text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs if commit_hash is None: commit_hash = getattr(config, "_commit_hash", None)
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False # Load model if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): # Load from a sharded PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) is_sharded = True
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) ): # Load from a safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)): # Load from a sharded TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) is_sharded = True
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# At this stage we don't have a weight file so we will raise an error. elif use_safetensors: raise EnvironmentError( f"Error no file named {SAFE_WEIGHTS_NAME} or {SAFE_WEIGHTS_INDEX_NAME} found in directory {pretrained_model_name_or_path}. " f"Please make sure that the model has been saved with `safe_serialization=True` or do not " f"set `use_safetensors=True`." ) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile( os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) ): raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME} or {SAFE_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
"but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " "weights." ) else: raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME}, {SAFE_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else:
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# set correct filename if from_pt: filename = WEIGHTS_NAME elif use_safetensors is not False: filename = SAFE_WEIGHTS_NAME else: filename = TF2_WEIGHTS_NAME
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: # Did not find the safetensors file, let's fallback to TF. # No support for sharded safetensors yet, so we'll raise an error if that's all we find. filename = TF2_WEIGHTS_NAME resolved_archive_file = cached_file( pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs ) if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file(
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None and filename == WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision,
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
"proxies": proxies, "token": token, "cache_dir": cache_dir, "local_files_only": local_files_only, } if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): is_sharded = True elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" " load this model from those weights." ) else: raise EnvironmentError(
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}," f" {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error.
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" ) if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, _commit_hash=commit_hash, )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
safetensors_from_pt = False if filename == SAFE_WEIGHTS_NAME: with safe_open(resolved_archive_file, framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError( f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt" elif filename == SAFE_WEIGHTS_INDEX_NAME: with safe_open(resolved_archive_file[0], framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError(
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt"
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if tf_to_pt_weight_rename is None and hasattr(model, "tf_to_pt_weight_rename"): # TODO Matt: This is a temporary workaround to allow weight renaming, but requires a method # to be defined for each class that requires a rename. We can probably just have a class-level # dict and a single top-level method or something and cut down a lot of boilerplate code tf_to_pt_weight_rename = model.tf_to_pt_weight_rename
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model( model, resolved_archive_file, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model.build_in_name_scope() # build the network with dummy inputs else: model.build_in_name_scope() # build the network with dummy inputs if safetensors_from_pt and not is_sharded: from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: # Load from a PyTorch safetensors checkpoint # We load in TF format here because PT weights often need to be transposed, and this is much # faster on GPU. Loading as numpy and transposing on CPU adds several seconds to load times. return load_pytorch_state_dict_in_tf2_model( model, safetensors_archive, tf_inputs=False, # No need to build the model again allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) elif safetensors_from_pt: from .modeling_tf_pytorch_utils import load_sharded_pytorch_safetensors_in_tf2_model
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
return load_sharded_pytorch_safetensors_in_tf2_model( model, resolved_archive_file, tf_inputs=False, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: if is_sharded: for file in resolved_archive_file: os.path.isfile(file), f"Error retrieving files {file}" if filename == SAFE_WEIGHTS_INDEX_NAME: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights_from_safetensors( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights( model, resolved_archive_file,
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: # Handles both H5 and safetensors missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else:
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" " to use it for predictions and inference." )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) except OSError: logger.info( "Generation config file not found, using a generation config created from the model config." ) pass
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, max_shard_size: Optional[Union[int, str]] = "10GB", token: Optional[Union[bool, str]] = None, # (`use_auth_token` is deprecated: we have to keep it here as we don't have **kwargs) use_auth_token: Optional[Union[bool, str]] = None, create_pr: bool = False, **base_model_card_args, ) -> str: """ Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`.
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
Parameters: repo_id (`str`): The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload model"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`bool` or `str`, *optional*):
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit.
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
Examples: ```python from transformers import TFAutoModel model = TFAutoModel.from_pretrained("google-bert/bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert") ``` """ if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if "repo_path_or_name" in base_model_card_args: warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead." ) repo_id = base_model_card_args.pop("repo_path_or_name") # Deprecation warning will be sent after for repo_url and organization repo_url = base_model_card_args.pop("repo_url", None) organization = base_model_card_args.pop("organization", None) if os.path.isdir(repo_id): working_dir = repo_id repo_id = repo_id.split(os.path.sep)[-1] else: working_dir = repo_id.split("/")[-1] repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir)
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. self.save_pretrained(work_dir, max_shard_size=max_shard_size) if hasattr(self, "history") and hasattr(self, "create_model_card"): # This is a Keras model and we might be able to fish out its History and make a model card out of it base_model_card_args = { "output_dir": work_dir, "model_name": Path(repo_id).name, } base_model_card_args.update(base_model_card_args) self.create_model_card(**base_model_card_args) self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, )
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
@classmethod def register_for_auto_class(cls, auto_class="TFAutoModel"): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class
281
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
class TFConv1D(keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range
282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x
282
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
class TFSharedEmbeddings(keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (`int`): The size of the embedding vectors. initializer_range (`float`, *optional*): The standard deviation to use when initializing the weights. If no value is provided, it will default to \\(1/\sqrt{hidden\_size}\\). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ # TODO (joao): flagged for delection due to embeddings refactor
283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range warnings.warn( "`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.", DeprecationWarning, ) def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape)
283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (`tf.Tensor`): In embedding mode, should be an int64 tensor with shape `[batch_size, length]`. In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`. mode (`str`, defaults to `"embedding"`): A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder.
283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
Returns: `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`. Raises: ValueError: if `mode` is not valid. Shared weights logic is adapted from [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24). """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer.
283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size])
283
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
class TFSequenceSummary(keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = False activation_string = getattr(config, "summary_activation", None) if activation_string is not None: self.has_activation = True self.activation = get_tf_activation(activation_string) self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = keras.layers.Dropout(config.summary_first_dropout)
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = keras.layers.Dropout(config.summary_last_dropout) self.hidden_size = config.hidden_size def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None)
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def build(self, input_shape): if self.built: return self.built = True if getattr(self, "summary", None) is not None: with tf.name_scope("summary"): self.summary.build(self.hidden_size)
284
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_utils.py
class DebugUnderflowOverflow: """ This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `nan` or `inf` weight and activation elements. There are 2 working modes: 1. Underflow/overflow detection (default) 2. Specific batch absolute min/max tracing without detection Mode 1: Underflow/overflow detection To activate the underflow/overflow detection, initialize the object with the model : ```python debug_overflow = DebugUnderflowOverflow(model) ``` then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event, each frame reporting 1. the fully qualified module name plus the class name whose `forward` was run 2. the absolute min and max value of all elements for each module weights, and the inputs and output
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision : ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ```
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow. As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16 numbers. The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed. By default the last 21 frames are printed. You can change the default to adjust for your needs. For example : ```python debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ```
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section. Mode 2. Specific batch absolute min/max tracing without detection The second work mode is per-batch tracing with the underflow/overflow detection feature turned off. Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed. This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
Early stopping: You can also specify the batch number after which to stop the training, with : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ``` This feature is mainly useful in the tracing mode, but you can use it for any mode. **Performance**: As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training down. Therefore remember to turn it off once the debugging needs have been met. Args: model (`nn.Module`): The model to debug. max_frames_to_save (`int`, *optional*, defaults to 21): How many frames back to record trace_batch_nums(`List[int]`, *optional*, defaults to `[]`): Which batch numbers to trace (turns detection off) abort_after_batch_num (`int``, *optional*): Whether to abort after a certain batch number has finished """
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): self.model = model self.trace_batch_nums = trace_batch_nums self.abort_after_batch_num = abort_after_batch_num # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence self.frames = collections.deque([], max_frames_to_save) self.frame = [] self.batch_number = 0 self.total_calls = 0 self.detected_overflow = False self.prefix = " " self.analyse_model() self.register_forward_hook() def save_frame(self, frame=None): if frame is not None: self.expand_frame(frame) self.frames.append("\n".join(self.frame)) self.frame = [] # start a new frame def expand_frame(self, line): self.frame.append(line) def trace_frames(self): print("\n".join(self.frames)) self.frames = []
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
def reset_saved_frames(self): self.frames = [] def dump_saved_frames(self): print(f"\nDetected inf/nan during batch_number={self.batch_number}") print(f"Last {len(self.frames)} forward frames:") print(f"{'abs min':8} {'abs max':8} metadata") print("\n".join(self.frames)) print("\n\n") self.frames = [] def analyse_model(self): # extract the fully qualified module names, to be able to report at run time. e.g.: # encoder.block.2.layer.0.SelfAttention.o # # for shared weights only the first shared module name will be registered self.module_names = {m: name for name, m in self.model.named_modules()} # self.longest_module_name = max(len(v) for v in self.module_names.values())
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
def analyse_variable(self, var, ctx): if torch.is_tensor(var): self.expand_frame(get_abs_min_max(var, ctx)) if detect_overflow(var, ctx): self.detected_overflow = True elif var is None: self.expand_frame(f"{'None':>17} {ctx}") else: self.expand_frame(f"{'not a tensor':>17} {ctx}") def batch_start_frame(self): self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***") self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") def batch_end_frame(self): self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n") def create_frame(self, module, input, output): self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}") # params for name, p in module.named_parameters(recurse=False): self.analyse_variable(p, name)
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
# inputs if isinstance(input, tuple): for i, x in enumerate(input): self.analyse_variable(x, f"input[{i}]") else: self.analyse_variable(input, "input") # outputs if isinstance(output, tuple): for i, x in enumerate(output): # possibly a tuple of tuples if isinstance(x, tuple): for j, y in enumerate(x): self.analyse_variable(y, f"output[{i}][{j}]") else: self.analyse_variable(x, f"output[{i}]") else: self.analyse_variable(output, "output") self.save_frame() def register_forward_hook(self): self.model.apply(self._register_forward_hook) def _register_forward_hook(self, module): module.register_forward_hook(self.forward_hook)
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
def forward_hook(self, module, input, output): # - input is a tuple of packed inputs (could be non-Tensors) # - output could be a Tensor or a tuple of Tensors and non-Tensors last_frame_of_batch = False trace_mode = True if self.batch_number in self.trace_batch_nums else False if trace_mode: self.reset_saved_frames() if self.total_calls == 0: self.batch_start_frame() self.total_calls += 1 # count batch numbers - the very first forward hook of the batch will be called when the # batch completes - i.e. it gets called very last - we know this batch has finished if module == self.model: self.batch_number += 1 last_frame_of_batch = True self.create_frame(module, input, output) # if last_frame_of_batch: # self.batch_end_frame() if trace_mode: self.trace_frames()
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
if last_frame_of_batch: self.batch_start_frame() if self.detected_overflow and not trace_mode: self.dump_saved_frames() # now we can abort, as it's pointless to continue running raise ValueError( "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. " "Please scroll up above this traceback to see the activation values prior to this event." ) # abort after certain batch if requested to do so if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num: raise ValueError( f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to" f" `abort_after_batch_num={self.abort_after_batch_num}` arg" )
285
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
class DebugOption(ExplicitEnum): UNDERFLOW_OVERFLOW = "underflow_overflow" TPU_METRICS_DEBUG = "tpu_metrics_debug"
286
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/debug_utils.py
class TFBaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
287
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
288
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.
289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
289
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """
290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
290
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.
291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """
291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
291
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).
292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
292
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithCrossAttentions(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
293
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """
293
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
293
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).
294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
294
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).
295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None
295
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFCausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
296
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFCausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).
297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
297
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None
298
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
299
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
299
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
class TFSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).
300
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py