text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
<Tip> `nan_inf_filter` only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model. </Tip> on_each_node (`bool`, *optional*, defaults to `True`): In multinode distributed training, whether to log using `log_level` once per node, or only on the main node. replica_level (`str`, *optional*, defaults to `"passive"`): Logger log level to use on replicas. Same choices as `log_level` Example: ```py >>> from transformers import TrainingArguments
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
>>> args = TrainingArguments("working_dir") >>> args = args.set_logging(strategy="steps", steps=100) >>> args.logging_steps 100 ``` """ self.logging_strategy = IntervalStrategy(strategy) if self.logging_strategy == IntervalStrategy.STEPS and steps == 0: raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.") self.logging_steps = steps self.report_to = report_to self.log_level = level self.logging_first_step = first_step self.logging_nan_inf_filter = nan_inf_filter self.log_on_each_node = on_each_node self.log_level_replica = replica_level return self
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
def set_push_to_hub( self, model_id: str, strategy: Union[str, HubStrategy] = "every_save", token: Optional[str] = None, private_repo: Optional[bool] = None, always_push: bool = False, ): """ A method that regroups all arguments linked to synchronizing checkpoints with the Hub. <Tip> Calling this method will set `self.push_to_hub` to `True`, which means the `output_dir` will begin a git directory synced with the repo (determined by `model_id`) and the content will be pushed each time a save is triggered (depending on your `self.save_strategy`). Calling [`~Trainer.save_model`] will also trigger a push. </Tip>
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Args: model_id (`str`): The name of the repository to keep in sync with the local *output_dir*. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance `"user_name/model"`, which allows you to push to an organization you are a member of with `"organization_name/model"`. strategy (`str` or [`~trainer_utils.HubStrategy`], *optional*, defaults to `"every_save"`): Defines the scope of what is pushed to the Hub and when. Possible values are:
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
- `"end"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card when the [`~Trainer.save_model`] method is called. - `"every_save"`: push the model, its configuration, the processing_class e.g. tokenizer (if passed along to the [`Trainer`]) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training. - `"checkpoint"`: like `"every_save"` but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `trainer.train(resume_from_checkpoint="last-checkpoint")`.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
- `"all_checkpoints"`: like `"checkpoint"` but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
token (`str`, *optional*): The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with `huggingface-cli login`. private_repo (`bool`, *optional*, defaults to `False`): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. always_push (`bool`, *optional*, defaults to `False`): Unless this is `True`, the `Trainer` will skip pushing a checkpoint when the previous push is not finished. Example: ```py >>> from transformers import TrainingArguments
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
>>> args = TrainingArguments("working_dir") >>> args = args.set_push_to_hub("me/awesome-model") >>> args.hub_model_id 'me/awesome-model' ``` """ self.push_to_hub = True self.hub_model_id = model_id self.hub_strategy = HubStrategy(strategy) self.hub_token = token self.hub_private_repo = private_repo self.hub_always_push = always_push return self def set_optimizer( self, name: Union[str, OptimizerNames] = "adamw_torch", learning_rate: float = 5e-5, weight_decay: float = 0, beta1: float = 0.9, beta2: float = 0.999, epsilon: float = 1e-8, args: Optional[str] = None, ): """ A method that regroups all arguments linked to the optimizer and its hyperparameters.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Args: name (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): The optimizer to use: `"adamw_hf"`, `"adamw_torch"`, `"adamw_torch_fused"`, `"adamw_apex_fused"`, `"adamw_anyprecision"` or `"adafactor"`. learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights. beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the adam optimizer or its variants. beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the adam optimizer or its variants. epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the adam optimizer or its variants.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
args (`str`, *optional*): Optional arguments that are supplied to AnyPrecisionAdamW (only useful when `optim="adamw_anyprecision"`).
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_optimizer(name="adamw_torch", beta1=0.8) >>> args.optim 'adamw_torch' ``` """ self.optim = OptimizerNames(name) self.learning_rate = learning_rate self.weight_decay = weight_decay self.adam_beta1 = beta1 self.adam_beta2 = beta2 self.adam_epsilon = epsilon self.optim_args = args return self def set_lr_scheduler( self, name: Union[str, SchedulerType] = "linear", num_epochs: float = 3.0, max_steps: int = -1, warmup_ratio: float = 0, warmup_steps: int = 0, ): """ A method that regroups all arguments linked to the learning rate scheduler and its hyperparameters.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Args: name (`str` or [`SchedulerType`], *optional*, defaults to `"linear"`): The scheduler type to use. See the documentation of [`SchedulerType`] for all possible values. num_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0):
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_lr_scheduler(name="cosine", warmup_ratio=0.05) >>> args.warmup_ratio 0.05 ``` """ self.lr_scheduler_type = SchedulerType(name) self.num_train_epochs = num_epochs self.max_steps = max_steps self.warmup_ratio = warmup_ratio self.warmup_steps = warmup_steps return self
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
def set_dataloader( self, train_batch_size: int = 8, eval_batch_size: int = 8, drop_last: bool = False, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, prefetch_factor: Optional[int] = None, auto_find_batch_size: bool = False, ignore_data_skip: bool = False, sampler_seed: Optional[int] = None, ): """ A method that regroups all arguments linked to the dataloaders creation.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Args: drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. num_workers (`int`, *optional*, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. pin_memory (`bool`, *optional*, defaults to `True`): Whether you want to pin memory in data loaders or not. Will default to `True`. persistent_workers (`bool`, *optional*, defaults to `False`): If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage. Will default to `False`.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
prefetch_factor (`int`, *optional*): Number of batches loaded in advance by each worker. 2 means there will be a total of 2 * num_workers batches prefetched across all workers. auto_find_batch_size (`bool`, *optional*, defaults to `False`) Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (`pip install accelerate`) ignore_data_skip (`bool`, *optional*, defaults to `False`): When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to `True`, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have. sampler_seed (`int`, *optional*):
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as `self.seed`. This can be used to ensure reproducibility of data sampling, independent of the model seed.
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
Example: ```py >>> from transformers import TrainingArguments >>> args = TrainingArguments("working_dir") >>> args = args.set_dataloader(train_batch_size=16, eval_batch_size=64) >>> args.per_device_train_batch_size 16 ``` """ self.per_device_train_batch_size = train_batch_size self.per_device_eval_batch_size = eval_batch_size self.dataloader_drop_last = drop_last self.dataloader_num_workers = num_workers self.dataloader_pin_memory = pin_memory self.dataloader_persistent_workers = persistent_workers self.dataloader_prefetch_factor = prefetch_factor self.auto_find_batch_size = auto_find_batch_size self.ignore_data_skip = ignore_data_skip self.data_seed = sampler_seed return self
179
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
class ParallelMode(Enum): NOT_PARALLEL = "not_parallel" NOT_DISTRIBUTED = "not_distributed" DISTRIBUTED = "distributed" SAGEMAKER_MODEL_PARALLEL = "sagemaker_model_parallel" SAGEMAKER_DATA_PARALLEL = "sagemaker_data_parallel" TPU = "tpu"
180
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py
class GGUFTensor(NamedTuple): weights: np.ndarray name: str metadata: dict
181
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class TensorProcessor: def __init__(self, config=None): self.config = config or {} def process(self, weights, name, **kwargs): return GGUFTensor(weights, name, {})
182
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class LlamaTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if ".attn_k." in name or ".attn_q." in name: num_heads = self.config.get("num_attention_heads") num_kv_heads = self.config.get("num_key_value_heads") if None in (num_heads, num_kv_heads): return GGUFTensor(weights, name, {}) if ".attn_q." in name: weights = self._reverse_permute_weights(weights, num_heads, num_heads) elif ".attn_k." in name: weights = self._reverse_permute_weights(weights, num_heads, num_kv_heads) return GGUFTensor(weights, name, {})
183
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
def _reverse_permute_weights( self, weights: np.ndarray, n_head: int, num_kv_heads: Optional[int] = None ) -> np.ndarray: # Original permutation implementation # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L1402-L1408 if num_kv_heads is not None and n_head != num_kv_heads: n_head = num_kv_heads dim = weights.shape[0] // n_head // 2 w = weights.reshape(n_head, dim, 2, *weights.shape[1:]) return w.swapaxes(2, 1).reshape(weights.shape)
183
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class Qwen2MoeTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if "_exp" in name: tensor_key_mapping = kwargs.get("tensor_key_mapping") parsed_parameters = kwargs.get("parsed_parameters") if tensor_key_mapping: self._split_moe_expert_tensor(weights, parsed_parameters, name, tensor_key_mapping) return GGUFTensor(weights, None, {}) if "ffn_gate_inp_shexp" in name: # for compatibility tensor shared_expert_gate must be (1, 2048) dim, # quantized one is (2048) weights = np.expand_dims(weights, axis=0) return GGUFTensor(weights, name, {})
184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
def _split_moe_expert_tensor( self, weights: np.ndarray, parsed_parameters: Dict[str, Dict], name: str, tensor_key_mapping: dict ): # Original merge implementation # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L1994-L2022 name = tensor_key_mapping[name] w_counter = self.config.get("num_experts", 60) for i in range(0, w_counter): temp_name = name.replace("mlp.experts.", f"mlp.experts.{i}.") exp_weight = weights[i] parsed_parameters["tensors"][temp_name] = torch.from_numpy(np.copy(exp_weight))
184
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class BloomTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if "attn_qkv" in name: num_heads = self.config["n_head"] n_embed = self.config["hidden_size"] if "weight" in name: weights = self._reverse_reshape_weights(weights, num_heads, n_embed) else: weights = self._reverse_reshape_bias(weights, num_heads, n_embed) return GGUFTensor(weights, name, {}) def _reverse_reshape_weights(self, weights: np.ndarray, n_head: int, n_embed: int): # Original reshape implementation # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L972-L985 q, k, v = np.array_split(weights, 3, axis=0)
185
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
q = q.reshape(n_head, n_embed // n_head, n_embed) k = k.reshape(n_head, n_embed // n_head, n_embed) v = v.reshape(n_head, n_embed // n_head, n_embed) qkv_weights = np.stack([q, k, v], axis=1) return qkv_weights.reshape(n_head * 3 * (n_embed // n_head), n_embed) def _reverse_reshape_bias(self, weights: np.ndarray, n_head: int, n_embed: int): # Original reshape implementation # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L986-L998 q_bias, k_bias, v_bias = np.array_split(weights, 3) q_bias = q_bias.reshape(n_head, n_embed // n_head) k_bias = k_bias.reshape(n_head, n_embed // n_head) v_bias = v_bias.reshape(n_head, n_embed // n_head) qkv_bias = np.stack([q_bias, k_bias, v_bias], axis=1).flatten() return qkv_bias
185
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class T5TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): bid = None for chunk in name.split("."): if chunk.isdigit(): bid = int(chunk) break return GGUFTensor(weights, name, {"bid": bid})
186
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class GPT2TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): # Original transpose implementation # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L2060-L2061 if ( "attn_qkv.weight" in name or "ffn_down.weight" in name or "ffn_up.weight" in name or "attn_output.weight" in name ): weights = weights.T
187
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
# Handle special case for output.weight if name == "output.weight": # output.weight has conflicts with attn_output.weight in name checking # Store the tensor directly and signal to skip further processing name = "lm_head.weight" parsed_parameters = kwargs.get("parsed_parameters", {}) parsed_parameters["tensors"][name] = torch.from_numpy(np.copy(weights)) name = None # Signal to skip further processing return GGUFTensor(weights, name, {})
187
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class MambaTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): if "ssm_conv1d.weight" in name: # for compatibility tensor ssm_conv1d must be (5120, 1, 4]) dim, # quantized one is (5120, 4) weights = np.expand_dims(weights, axis=1) if "ssm_a" in name: # Original exponential implementation # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L2975-L2977 weights = np.log(-weights) return GGUFTensor(weights, name, {})
188
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class NemotronTensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) # ref : https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L4666 def process(self, weights, name, **kwargs): if "norm.weight" in name: weights = weights - 1 return GGUFTensor(weights, name, {})
189
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class Gemma2TensorProcessor(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) # ref: https://github.com/ggerganov/llama.cpp/blob/d79d8f39b4da6deca4aea8bf130c6034c482b320/convert_hf_to_gguf.py#L3191 # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 def process(self, weights, name, **kwargs): if "norm.weight" in name: weights = weights - 1 return GGUFTensor(weights, name, {})
190
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_gguf_pytorch_utils.py
class FlaxBaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
191
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
191
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None
192
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """
193
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None
193
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None
194
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`Dict[str, jnp.ndarray]`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
195
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Dict[str, jnp.ndarray]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
195
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states.
196
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
196
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
196
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states.
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None
197
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model.
198
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
198
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
198
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None
198
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
199
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
200
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
200
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if `config.is_decoder = True`. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """
200
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None
200
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
201
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
201
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
202
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
202
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
202
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
202
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
202
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxNextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
203
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
203
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
204
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
204
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
205
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
205
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
205
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
205
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
205
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
206
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
206
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
207
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
207
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
208
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
208
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
209
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_flax_outputs.py
class Cache(torch.nn.Module): """ Base, abstract class for all caches. The actual data structure is specific to each subclass. """ def __init__(self): super().__init__() def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
210
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. These are specific to each subclass and allow new types of cache to be created. Return: A tuple containing the updated key and value states. """ raise NotImplementedError("Make sure to implement `update` in a subclass.") def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # TODO: deprecate this function in favor of `cache_position` raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")
210
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
def get_max_cache_shape(self) -> Optional[int]: """Returns the maximum sequence length (i.e. max capacity) of the cache object""" raise NotImplementedError("Make sure to implement `get_max_cache_shape` in a subclass.") def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int: """Given the sequence length of the new inputs, returns the usable length of the cache.""" # Cache without size limit -> all cache is usable # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache # length, we will need to evict part of the cache (and thus not all cache is usable) max_length = self.get_max_cache_shape() previous_seq_length = self.get_seq_length(layer_idx) if max_length is not None and previous_seq_length + new_seq_length > max_length: return max_length - new_seq_length return previous_seq_length
210
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): if self.key_cache[layer_idx] != []: device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) if self.value_cache[layer_idx] != []: device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) @property def seen_tokens(self): logger.warning_once( "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` " "model input instead." ) if hasattr(self, "_seen_tokens"): return self._seen_tokens else: return None
210
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
class CacheConfig: """ Base class for cache configs """ cache_implementation: None @classmethod def from_dict(cls, config_dict, **kwargs): """ Constructs a CacheConfig instance from a dictionary of parameters. Args: config_dict (Dict[str, Any]): Dictionary containing configuration parameters. **kwargs: Additional keyword arguments to override dictionary values. Returns: CacheConfig: Instance of CacheConfig constructed from the dictionary. """ config = cls(**config_dict) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) return config
211
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_json_file def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `QuantizationConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" writer.write(json_string)
211
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_dict def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return copy.deepcopy(self.__dict__) # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__iter__ def __iter__(self): """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin""" for attr, value in copy.deepcopy(self.__dict__).items(): yield attr, value # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__repr__ def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}"
211
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py