text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class Speech2TextProcessor(ProcessorMixin): r""" Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a single processor. [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more information. Args: feature_extractor (`Speech2TextFeatureExtractor`): An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2TextTokenizer`): An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "Speech2TextFeatureExtractor" tokenizer_class = "Speech2TextTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's [`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") audio = kwargs.pop("raw_speech") else: audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
class_definition
757
4,818
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/speech_to_text/processing_speech_to_text.py
null
3,300
class GemmaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma-7B. e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GemmaModel`] hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 16): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The legacy activation function. It is overwritten by the `hidden_activation`. hidden_activation (`str` or `function`, *optional*): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import GemmaModel, GemmaConfig >>> # Initializing a Gemma gemma-7b style configuration >>> configuration = GemmaConfig() >>> # Initializing a model from the gemma-7b style configuration >>> model = GemmaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act="gelu_pytorch_tanh", hidden_activation=None, max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
class_definition
1,504
7,757
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,301
class GemmaTokenizer(LlamaTokenizer, PreTrainedTokenizer): """ Construct a Gemma tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Gemma should be used. spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add spaces between special tokens. """ def __init__( self, vocab_file, unk_token="<unk>", bos_token="<bos>", eos_token="<eos>", pad_token="<pad>", sp_model_kwargs: Optional[Dict[str, Any]] = None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, **kwargs, ): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.use_default_system_prompt = use_default_system_prompt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) PreTrainedTokenizer.__init__( self, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, use_default_system_prompt=use_default_system_prompt, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs, ) def get_spm_processor(self): raise AttributeError("Not needed for Gemma") def unk_token_length(self): raise AttributeError("Not needed for Gemma") def tokenize(self, text: "TextInput", **kwargs) -> List[str]: """ Args: text: TextInput Simply calls PreTrainedTokenizer's method """ return PreTrainedTokenizer.tokenize(self, text, **kwargs) def _tokenize(self, text, **kwargs): """ Args: text: TextInput Returns a tokenized string. The Gemma tokenizer never adds a prefix space. """ return self.sp_model.encode(text, out_type=str) def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, spaces_between_special_tokens: bool = False, **kwargs, ) -> str: sub_texts = [] current_sub_text = [] for ids in token_ids: if skip_special_tokens and ids in self.all_special_ids: continue if ids in self._added_tokens_decoder: if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) sub_texts.append(self._added_tokens_decoder[ids].content) current_sub_text = [] else: current_sub_text.append(ids) if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) if spaces_between_special_tokens: sub_texts = " ".join(sub_texts) else: sub_texts = "".join(sub_texts) return sub_texts.replace(SPIECE_UNDERLINE, " ") def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self._added_tokens_encoder: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string
class_definition
7,760
14,786
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,302
class GemmaRMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) # Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}"
class_definition
14,789
15,460
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,303
class GemmaMLP(LlamaMLP): def __init__(self, config): super().__init__() self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class_definition
15,463
15,812
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,304
class GemmaModel(LlamaModel): def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # normalized # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5 # See https://github.com/huggingface/transformers/pull/29402 normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple()
class_definition
15,815
20,493
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,305
class GemmaForCausalLM(LlamaForCausalLM): def forward(**super_kwargs): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from transformers import AutoTokenizer, GemmaForCausalLM >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b") >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```""" return super().forward(**super_kwargs)
class_definition
20,496
22,145
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,306
class GemmaForSequenceClassification(LlamaForSequenceClassification): pass
class_definition
22,148
22,226
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,307
class GemmaForTokenClassification(LlamaForTokenClassification): pass
class_definition
22,229
22,301
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modular_gemma.py
null
3,308
class GemmaTokenizer(PreTrainedTokenizer): """ Construct a Gemma tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Gemma should be used. spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add spaces between special tokens. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<bos>", eos_token="<eos>", pad_token="<pad>", sp_model_kwargs: Optional[Dict[str, Any]] = None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, **kwargs, ): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.use_default_system_prompt = use_default_system_prompt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, use_default_system_prompt=use_default_system_prompt, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs, ) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__.update(d) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size() def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def tokenize(self, text: "TextInput", **kwargs) -> List[str]: """ Args: text: TextInput Simply calls PreTrainedTokenizer's method """ return super().tokenize(text, **kwargs) def _tokenize(self, text, **kwargs): """ Args: text: TextInput Returns a tokenized string. The Gemma tokenizer never adds a prefix space. """ return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self._added_tokens_encoder: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, spaces_between_special_tokens: bool = False, **kwargs, ) -> str: sub_texts = [] current_sub_text = [] for ids in token_ids: if skip_special_tokens and ids in self.all_special_ids: continue if ids in self._added_tokens_decoder: if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) sub_texts.append(self._added_tokens_decoder[ids].content) current_sub_text = [] else: current_sub_text.append(ids) if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) if spaces_between_special_tokens: sub_texts = " ".join(sub_texts) else: sub_texts = "".join(sub_texts) return sub_texts.replace(SPIECE_UNDERLINE, " ")
class_definition
1,875
14,134
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/tokenization_gemma.py
null
3,309
class GemmaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma-7B. e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GemmaModel`] hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 16): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The legacy activation function. It is overwritten by the `hidden_activation`. hidden_activation (`str` or `function`, *optional*): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import GemmaModel, GemmaConfig >>> # Initializing a Gemma gemma-7b style configuration >>> configuration = GemmaConfig() >>> # Initializing a model from the gemma-7b style configuration >>> model = GemmaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act="gelu_pytorch_tanh", hidden_activation=None, max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
class_definition
1,505
7,758
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/configuration_gemma.py
null
3,310
class GemmaTokenizerFast(PreTrainedTokenizerFast): """ Construct a Gemma tokenizer fast. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no prefix space. Normalization is applied to replace `" "` with `"▁"` ```python >>> from transformers import GemmaTokenizerFast >>> tokenizer = GemmaTokenizerFast.from_pretrained("hf-internal-testing/dummy-gemma") >>> tokenizer.encode("Hello this is a test") [2, 4521, 736, 603, 476, 2121] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The padding token add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = GemmaTokenizer padding_side = "left" model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<bos>", eos_token="<eos>", pad_token="<pad>", add_bos_token=True, add_eos_token=False, **kwargs, ): super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
class_definition
1,168
8,278
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/tokenization_gemma_fast.py
null
3,311
class FlaxGemmaRMSNorm(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.epsilon = self.config.rms_norm_eps self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size) def __call__(self, hidden_states): variance = jnp.asarray(hidden_states, dtype=jnp.float32) variance = jnp.power(variance, 2) variance = variance.mean(-1, keepdims=True) # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt` hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon) return (1 + self.weight) * jnp.asarray(hidden_states, dtype=self.dtype)
class_definition
7,245
7,943
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,312
class FlaxGemmaRotaryEmbedding(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 # Ignore copy def setup(self): head_dim = self.config.head_dim self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim) def __call__(self, key, query, position_ids): sincos = self.sincos[position_ids] sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1) key = apply_rotary_pos_emb(key, sin_pos, cos_pos) query = apply_rotary_pos_emb(query, sin_pos, cos_pos) key = jnp.asarray(key, dtype=self.dtype) query = jnp.asarray(query, dtype=self.dtype) return key, query
class_definition
8,049
8,729
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,313
class FlaxGemmaAttention(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 causal: bool = True is_cross_attention: bool = False def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads kernel = jax.nn.initializers.normal(self.config.initializer_range) self.q_proj = nn.Dense( self.num_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel ) self.k_proj = nn.Dense( self.num_key_value_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel, ) self.v_proj = nn.Dense( self.num_key_value_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel, ) self.o_proj = nn.Dense(self.embed_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel) self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") self.rotary_emb = FlaxGemmaRotaryEmbedding(config, dtype=self.dtype) def _split_heads(self, hidden_states, num_heads): return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads * self.head_dim,)) @nn.compact # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, position_ids, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads) key = self._split_heads(key, self.num_key_value_heads) value = self._split_heads(value, self.num_key_value_heads) key, query = self.rotary_emb(key, query, position_ids) query_length, key_length = query.shape[1], key.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng("dropout") # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.has_variable("cache", "cached_key") or init_cache: key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) # transform boolean mask into float mask attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) key = jnp.repeat(key, repeats=self.num_key_value_groups, axis=2) value = jnp.repeat(value, repeats=self.num_key_value_groups, axis=2) # usual dot product attention attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype attn_weights = dot_product_attention_weights( query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=attention_dtype, ) if self.attention_softmax_in_fp32: attn_weights = attn_weights.astype(self.dtype) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.o_proj(attn_output) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs
class_definition
8,732
15,846
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,314
class FlaxGemmaMLP(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim kernel_init = jax.nn.initializers.normal(self.config.initializer_range) if self.config.hidden_activation is None: logger.warning_once( "Gemma's activation function should be approximate GeLU and not exact GeLU. " "Changing the activation function to `gelu_pytorch_tanh`." f"if you want to use the legacy `{self.config.hidden_act}`, " f"edit the `model.config` to set `hidden_activation={self.config.hidden_act}` " " instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details." ) hidden_activation = "gelu_pytorch_tanh" else: hidden_activation = self.config.hidden_activation self.act = ACT2FN[hidden_activation] self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) def __call__(self, hidden_states): up_proj_states = self.up_proj(hidden_states) gate_states = self.act(self.gate_proj(hidden_states)) hidden_states = self.down_proj(up_proj_states * gate_states) return hidden_states
class_definition
15,849
17,499
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,315
class FlaxGemmaDecoderLayer(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.input_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) self.self_attn = FlaxGemmaAttention(self.config, dtype=self.dtype) self.post_attention_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) self.mlp = FlaxGemmaMLP(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) outputs = self.self_attn( hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) # residual connection attn_output = outputs[0] hidden_states = residual + attn_output residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + hidden_states return (hidden_states,) + outputs[1:]
class_definition
17,602
19,015
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,316
class FlaxGemmaPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GemmaConfig base_model_prefix = "model" module_class: nn.Module = None def __init__( self, config: GemmaConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def __call__( self, input_ids, attention_mask=None, position_ids=None, params: dict = None, past_key_values: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGemmaAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs
class_definition
19,163
24,476
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,317
class FlaxGemmaLayerCollection(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [ FlaxGemmaDecoderLayer(self.config, dtype=self.dtype, name=str(i)) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block( hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) # this contains possible `None` values - `FlaxGemmaModule` will filter them out outputs = (hidden_states, all_hidden_states, all_attentions) return outputs
class_definition
24,582
26,057
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,318
class FlaxGemmaModule(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.hidden_size = self.config.hidden_size embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) self.embed_tokens = nn.Embed( self.config.vocab_size, self.hidden_size, embedding_init=embedding_init, dtype=self.dtype, ) self.layers = FlaxGemmaLayerCollection(self.config, dtype=self.dtype) self.norm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) # Ignore copy def __call__( self, input_ids, attention_mask=None, position_ids=None, deterministic=True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): input_embeds = self.embed_tokens(input_ids.astype("i4")) input_embeds = input_embeds * (self.config.hidden_size**0.5) outputs = self.layers( input_embeds, position_ids=position_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1], )
class_definition
26,154
28,134
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,319
class FlaxGemmaModel(FlaxGemmaPreTrainedModel): module_class = FlaxGemmaModule
class_definition
28,385
28,467
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,320
class FlaxGemmaForCausalLMModule(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.model = FlaxGemmaModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) # Ignore copy def __call__( self, input_ids, attention_mask=None, position_ids=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): outputs = self.model( input_ids, position_ids=position_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.model.variables["params"]["embed_tokens"]["embedding"].T lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class_definition
28,746
30,379
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,321
class FlaxGemmaForCausalLM(FlaxGemmaPreTrainedModel): module_class = FlaxGemmaForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since Gemma uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
class_definition
30,631
32,156
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_flax_gemma.py
null
3,322
class GemmaRMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) # Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}"
class_definition
2,479
3,150
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,323
class GemmaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class_definition
3,153
3,821
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,324
class GemmaRotaryEmbedding(nn.Module): def __init__(self, config: GemmaConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
3,824
7,019
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,325
class GemmaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GemmaConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
class_definition
10,298
13,865
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,326
class GemmaDecoderLayer(nn.Module): def __init__(self, config: GemmaConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = GemmaAttention(config=config, layer_idx=layer_idx) self.mlp = GemmaMLP(config) self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class_definition
13,868
15,938
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,327
class GemmaPreTrainedModel(PreTrainedModel): config_class = GemmaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["GemmaDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
16,960
17,883
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,328
class GemmaModel(GemmaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GemmaDecoderLayer`] Args: config: GemmaConfig """ def __init__(self, config: GemmaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = GemmaRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # normalized # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5 # See https://github.com/huggingface/transformers/pull/29402 normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
22,687
34,193
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,329
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
class_definition
34,196
34,258
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,330
class GemmaForCausalLM(GemmaPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} def __init__(self, config): super().__init__(config) self.model = GemmaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from transformers import AutoTokenizer, GemmaForCausalLM >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b") >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
34,261
39,305
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,331
class GemmaForSequenceClassification(GemmaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = GemmaModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
class_definition
40,098
43,910
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,332
class GemmaForTokenClassification(GemmaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = GemmaModel(config) if getattr(config, "classifier_dropout", None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, "hidden_dropout", None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.score = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.score(sequence_output) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.config) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
44,157
47,369
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gemma/modeling_gemma.py
null
3,333
class TimeSeriesTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series Transformer [huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: prediction_length (`int`): The prediction length for the decoder. In other words, the prediction horizon of the model. This value is typically dictated by the dataset and we recommend to set it appropriately. context_length (`int`, *optional*, defaults to `prediction_length`): The context length for the encoder. If `None`, the context length will be the same as the `prediction_length`. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial". loss (`string`, *optional*, defaults to `"nll"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood (nll) - which currently is the only supported one. input_size (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. scaling (`string` or `bool`, *optional* defaults to `"mean"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`): The lags of the input time series as covariates often dictated by the frequency of the data. Default is `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately. num_time_features (`int`, *optional*, defaults to 0): The number of time features in the input time series. num_dynamic_real_features (`int`, *optional*, defaults to 0): The number of dynamic real valued features. num_static_categorical_features (`int`, *optional*, defaults to 0): The number of static categorical features. num_static_real_features (`int`, *optional*, defaults to 0): The number of static real valued features. cardinality (`list[int]`, *optional*): The cardinality (number of different values) for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. embedding_dimension (`list[int]`, *optional*): The dimension of the embedding for each of the static categorical features. Should be a list of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if `num_static_categorical_features` is > 0. d_model (`int`, *optional*, defaults to 64): Dimensionality of the transformer layers. encoder_layers (`int`, *optional*, defaults to 2): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 2): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer decoder. encoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in encoder. decoder_ffn_dim (`int`, *optional*, defaults to 32): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and `"relu"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the encoder, and decoder. encoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each encoder layer. decoder_layerdrop (`float`, *optional*, defaults to 0.1): The dropout probability for the attention and fully connected layers for each decoder layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout probability used between the two layers of the feed-forward networks. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for each time step of inference. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. use_cache (`bool`, *optional*, defaults to `True`): Whether to use the past key/values attentions (if applicable to the model) to speed up decoding. Example: ```python >>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel >>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction >>> configuration = TimeSeriesTransformerConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = TimeSeriesTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "time_series_transformer" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self, prediction_length: Optional[int] = None, context_length: Optional[int] = None, distribution_output: str = "student_t", loss: str = "nll", input_size: int = 1, lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7], scaling: Optional[Union[str, bool]] = "mean", num_dynamic_real_features: int = 0, num_static_categorical_features: int = 0, num_static_real_features: int = 0, num_time_features: int = 0, cardinality: Optional[List[int]] = None, embedding_dimension: Optional[List[int]] = None, encoder_ffn_dim: int = 32, decoder_ffn_dim: int = 32, encoder_attention_heads: int = 2, decoder_attention_heads: int = 2, encoder_layers: int = 2, decoder_layers: int = 2, is_encoder_decoder: bool = True, activation_function: str = "gelu", d_model: int = 64, dropout: float = 0.1, encoder_layerdrop: float = 0.1, decoder_layerdrop: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, num_parallel_samples: int = 100, init_std: float = 0.02, use_cache=True, **kwargs, ): # time series specific configuration self.prediction_length = prediction_length self.context_length = context_length or prediction_length self.distribution_output = distribution_output self.loss = loss self.input_size = input_size self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.scaling = scaling self.num_dynamic_real_features = num_dynamic_real_features self.num_static_real_features = num_static_real_features self.num_static_categorical_features = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(cardinality) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) self.cardinality = cardinality else: self.cardinality = [0] if embedding_dimension and num_static_categorical_features > 0: if len(embedding_dimension) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) self.embedding_dimension = embedding_dimension else: self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.feature_size = input_size * len(lags_sequence) + self._number_of_features self.d_model = d_model self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def _number_of_features(self) -> int: return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
class_definition
842
11,656
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py
null
3,334
class TimeSeriesFeatureEmbedder(nn.Module): """ Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features. """ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: # we slice the last dimension, giving an array of length # self.num_features with shape (N,T) or (N) cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat( [ embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices) ], dim=-1, )
class_definition
1,616
2,799
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,335
class TimeSeriesStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale
class_definition
2,802
4,555
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,336
class TimeSeriesMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale
class_definition
4,558
6,972
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,337
class TimeSeriesNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale
class_definition
6,975
8,189
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,338
class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None: super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter) -> nn.Parameter: """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = out.shape position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) out.requires_grad = False # set early to avoid an error in pytorch-1.8+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions)
class_definition
9,664
11,235
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,339
class TimeSeriesValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x)
class_definition
11,238
11,523
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,340
class TimeSeriesTransformerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[TimeSeriesTransformerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value
class_definition
11,626
19,050
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,341
class TimeSeriesTransformerEncoderLayer(nn.Module): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
19,187
22,438
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,342
class TimeSeriesTransformerDecoderLayer(nn.Module): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs
class_definition
22,740
28,751
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,343
class TimeSeriesTransformerPreTrainedModel(PreTrainedModel): config_class = TimeSeriesTransformerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, TimeSeriesSinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
28,754
29,532
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,344
class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TimeSeriesTransformerEncoderLayer`]. Args: config: TimeSeriesTransformerConfig """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
41,586
48,093
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,345
class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TimeSeriesTransformerDecoderLayer`] Args: config: TimeSeriesTransformerConfig """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, )
class_definition
48,096
59,419
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,346
class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: self.scaler = TimeSeriesMeanScaler(config) elif config.scaling == "std": self.scaler = TimeSeriesStdScaler(config) else: self.scaler = TimeSeriesNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = TimeSeriesFeatureEmbedder( cardinalities=config.cardinality, embedding_dims=config.embedding_dimension, ) # transformer encoder-decoder and mask initializer self.encoder = TimeSeriesTransformerEncoder(config) self.decoder = TimeSeriesTransformerDecoder(config) # Initialize weights and apply final processing self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences( self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0 ) -> torch.Tensor: """ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :]. Args: sequence: Tensor The sequence from which lagged subsequences should be extracted. Shape: (N, T, C). subsequences_length : int Length of the subsequences to be extracted. shift: int Shift the lags by this amount back. """ sequence_length = sequence.shape[1] indices = [lag - shift for lag in self.config.lags_sequence] if max(indices) + subsequences_length > sequence_length: raise ValueError( f"lags cannot go further than history length, found lag {max(indices)} " f"while history length is only {sequence_length}" ) lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1) def create_network_inputs( self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, ): # time feature time_feat = ( torch.cat( ( past_time_features[:, self._past_length - self.config.context_length :, ...], future_time_features, ), dim=1, ) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length :, ...] ) # target if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length :] observed_context = past_observed_mask[:, -self.config.context_length :] _, loc, scale = self.scaler(context, observed_context) inputs = ( (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale ) # static features log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p() log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) # all features features = torch.cat((expanded_static_feat, time_feat), dim=-1) # lagged features subsequences_length = ( self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length ) lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError( f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match" ) # transformer inputs transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1) return transformer_inputs, loc, scale, static_feat def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqTSModelOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import TimeSeriesTransformerModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_inputs, loc, scale, static_feat = self.create_network_inputs( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, ) if encoder_outputs is None: enc_input = transformer_inputs[:, : self.config.context_length, ...] encoder_outputs = self.encoder( inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) dec_input = transformer_inputs[:, self.config.context_length :, ...] decoder_outputs = self.decoder( inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return Seq2SeqTSModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat, )
class_definition
59,601
70,803
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,347
class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel): def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.model = TimeSeriesTransformerModel(config) if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model) self.target_shape = self.distribution_output.event_shape if config.loss == "nll": self.loss = nll else: raise ValueError(f"Unknown loss function {config.loss}") # Initialize weights of distribution_output and apply final processing self.post_init() def output_params(self, dec_output): return self.parameter_projection(dec_output) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, future_observed_mask: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqTSModelOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import TimeSeriesTransformerForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = TimeSeriesTransformerForPrediction.from_pretrained( ... "huggingface/time-series-transformer-tourism-monthly" ... ) >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> loss = outputs.loss >>> loss.backward() >>> # during inference, one only provides past values >>> # as well as possible additional features >>> # the model autoregressively generates future values >>> outputs = model.generate( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_time_features=batch["future_time_features"], ... ) >>> mean_prediction = outputs.sequences.mean(dim=1) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, ) prediction_loss = None params = None if future_values is not None: params = self.output_params(outputs[0]) # outputs.last_hidden_state # loc is 3rd last and scale is 2nd last output distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:] return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput( loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> SampleTSPredictionOutput: r""" Greedily generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to sampled predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Return: [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for multivariate predictions. """ outputs = self( static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True, ) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = ( past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc ) / repeated_scale expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1) features = torch.cat((expanded_static_feat, future_time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) future_samples = [] # greedy decoding for k in range(self.config.prediction_length): lagged_sequence = self.model.get_lagged_subsequences( sequence=repeated_past_values, subsequences_length=1 + k, shift=1, ) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1) dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) dec_last_hidden = dec_output.last_hidden_state params = self.parameter_projection(dec_last_hidden[:, -1:]) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) next_sample = distr.sample() repeated_past_values = torch.cat( (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1 ) future_samples.append(next_sample) concat_future_samples = torch.cat(future_samples, dim=1) return SampleTSPredictionOutput( sequences=concat_future_samples.reshape( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) )
class_definition
70,978
88,552
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
null
3,348
class MllamaConverter(TikTokenConverter): def __init__( self, vocab_file, special_tokens: List[str], pattern: str, model_max_length: int, chat_template: Optional[str] = None, **kwargs, ): super().__init__(vocab_file, pattern=pattern) self.additional_special_tokens = special_tokens tokenizer = self.converted() if chat_template is not None: kwargs["chat_template"] = chat_template self.tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, model_input_names=["input_ids", "attention_mask"], model_max_length=model_max_length, **kwargs, )
class_definition
24,227
24,947
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/convert_mllama_weights_to_hf.py
null
3,349
class MllamaImageProcessor(BaseImageProcessor): """ Constructs a Mllama image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA. Only has an effect if the input image is in the PIL format. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values. The height and width values should be equal. resample (`int`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to 0.0): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `True`): Whether or not to pad the images to the largest height and width in the batch. max_image_tiles (`int`, *optional*, defaults to 4): The maximum number of tiles to split the image into. """ model_input_names = ["pixel_values", "num_tiles", "aspect_ratio_ids", "aspect_ratio_mask"] def __init__( self, do_convert_rgb: bool = True, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, max_image_tiles: int = 4, **kwargs, ) -> None: super().__init__(**kwargs) self.do_convert_rgb = do_convert_rgb self.do_resize = do_resize self.size = size if size is not None else {"height": 224, "width": 224} self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_pad = do_pad self.max_image_tiles = max_image_tiles _validate_mllama_preprocess_arguments(self.do_resize, self.size, self.do_pad, self.max_image_tiles) def preprocess( self, images: ImageInput, do_convert_rgb: Optional[bool] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, max_image_tiles: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: Optional[Union[str, TensorType]] = None, ): """ Preprocess a batch of images. Args: images (`ImageInput`): A list of images to preprocess. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values. The height and width values should be equal. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether or not to pad the images to the largest height and width in the batch. max_image_tiles (`int`, *optional*, defaults to `self.max_image_tiles`): The maximum number of tiles to split the image into. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. Returns: `BatchFeature` of the following structure: - **pixel_values** (`TensorType`): The preprocessed pixel values. - **aspect_ratio_ids** (`TensorType`): The aspect ratio ids of the images. - **num_tiles** (`List[List[int]]`): The number of tiles for each image in the batch. """ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad max_image_tiles = max_image_tiles if max_image_tiles is not None else self.max_image_tiles validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # extra validation _validate_mllama_preprocess_arguments(do_resize, size, do_pad, max_image_tiles) images_list = make_list_of_images(images) if self.do_convert_rgb: images_list = [[convert_to_rgb(image) for image in images] for images in images_list] images_list = [[to_numpy_array(image) for image in images] for images in images_list] batch_images = [] batch_aspect_ratios = [] # iterate over batch samples for images in images_list: sample_images = [] sample_aspect_ratios = [] # iterate over images in a batch sample for image in images: # convert images to channels first format for faster processing # LAST is slower for `pad` and not supported by `split_to_tiles` data_format = ChannelDimension.FIRST image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) # do_resize=False is not supported, validated image, aspect_ratio = self.resize( image=image, size=size, resample=resample, max_image_tiles=max_image_tiles, input_data_format=data_format, data_format=data_format, ) # do_pad=False is not supported, validated image = self.pad( image=image, size=size, aspect_ratio=aspect_ratio, input_data_format=data_format, data_format=data_format, ) if do_rescale: image = self.rescale( image=image, scale=rescale_factor, input_data_format=input_data_format, data_format=data_format, ) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format, data_format=data_format, ) num_tiles_height, num_tiles_width = aspect_ratio image = split_to_tiles(image, num_tiles_height, num_tiles_width) sample_images.append(image) sample_aspect_ratios.append((num_tiles_height, num_tiles_width)) batch_images.append(sample_images) batch_aspect_ratios.append(sample_aspect_ratios) images, num_tiles = pack_images(batch_images, max_image_tiles) aspect_ratio_ids = convert_aspect_ratios_to_ids(batch_aspect_ratios, max_image_tiles=max_image_tiles) aspect_ratio_mask = build_aspect_ratio_mask(batch_aspect_ratios, max_image_tiles=max_image_tiles) # images (np.ndarray) with shape (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width) # aspect_ratio_ids (np.ndarray) with shape (batch_size, max_num_images) - aspect ratio ids for each image, padded to max_num_images with 0 # num_tiles (List[List[int]]) with (batch_size, num_images_in_batch) - real number of tiles for each image, not padded # aspect_ratio_mask (np.ndarray) with shape (batch_size, max_num_images, max_image_tiles) - number of tiles for each image, padded to max_num_images with 0 encoded_inputs = BatchFeature( data={ "pixel_values": images, "aspect_ratio_ids": aspect_ratio_ids, "aspect_ratio_mask": aspect_ratio_mask, }, tensor_type=return_tensors, ) encoded_inputs["num_tiles"] = num_tiles return encoded_inputs def pad( self, image: np.ndarray, size: Dict[str, int], aspect_ratio: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is (1, 2), the image will be padded to 224x448. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. aspect_ratio (`Tuple[int, int]`): The aspect ratio of the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The padded image. """ _validate_size(size) image_height, image_width = get_image_size(image, channel_dim=input_data_format) num_tiles_height, num_tiles_width = aspect_ratio padded_height = num_tiles_height * size["height"] padded_width = num_tiles_width * size["width"] pad_size = ((0, padded_height - image_height), (0, padded_width - image_width)) image = pad( image, pad_size, mode=PaddingMode.CONSTANT, constant_values=0, data_format=data_format, input_data_format=input_data_format, ) return image def resize( self, image: np.ndarray, size: Dict[str, int], max_image_tiles: int, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Union[np.ndarray, Tuple[int, int]]: """ Resizes an image to fit within a tiled canvas while maintaining its aspect ratio. The optimal canvas size is calculated based on the maximum number of tiles and the tile size. The function first determines the best tile arrangement for the image, then resizes the image to fit within this canvas. The resized image and the number of tiles along the height and width dimensions are returned. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. max_image_tiles (`int`): The maximum number of tiles to split the image into. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `Union[np.ndarray, Tuple[int, int]]`: The resized image and a tuple containing the number of tiles along the height and width dimensions. """ _validate_size(size) image_height, image_width = get_image_size(image, channel_dim=input_data_format) tile_size = size["height"] canvas_height, canvas_width = get_optimal_tiled_canvas( image_height=image_height, image_width=image_width, max_image_tiles=max_image_tiles, tile_size=tile_size, ) num_tiles_height = canvas_height // tile_size num_tiles_width = canvas_width // tile_size new_height, new_width = get_image_size_fit_to_canvas( image_height=image_height, image_width=image_width, canvas_height=canvas_height, canvas_width=canvas_width, tile_size=tile_size, ) image = resize( image, (new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format, ) return image, (num_tiles_height, num_tiles_width)
class_definition
21,800
39,175
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/image_processing_mllama.py
null
3,350
class MllamaImagesKwargs(ImagesKwargs, total=False): max_image_tiles: Optional[int]
class_definition
1,095
1,182
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/processing_mllama.py
null
3,351
class MllamaProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: MllamaImagesKwargs _defaults = { "image_kwargs": { "max_image_tiles": 4, }, }
class_definition
1,185
1,378
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/processing_mllama.py
null
3,352
class MllamaProcessor(ProcessorMixin): r""" Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and [`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. The preferred way of passing kwargs is as a dictionary per modality, see usage example below. ```python from transformers import MllamaProcessor from PIL import Image processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision") processor( images=your_pil_image, text=["<|image|>If I had to write a haiku for this one"], images_kwargs = {"size": {"height": 448, "width": 448}}, text_kwargs = {"padding": "right"}, common_kwargs = {"return_tensors": "pt"}, ) ``` Args: image_processor ([`MllamaImageProcessor`]): The image processor is a required input. tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "MllamaImageProcessor" tokenizer_class = "PreTrainedTokenizerFast" def __init__(self, image_processor, tokenizer): if not hasattr(tokenizer, "image_token"): self.image_token = "<|image|>" self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) else: self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id self.python_token = "<|python_tag|>" self.python_token_id = tokenizer.convert_tokens_to_ids(self.python_token) self.bos_token = tokenizer.bos_token self.chat_template = tokenizer.chat_template super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, audio=None, videos=None, **kwargs: Unpack[MllamaProcessorKwargs], ) -> BatchFeature: """ Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` arguments to MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask """ if text is None and images is None: raise ValueError("You must specify either text or images.") output_kwargs = self._merge_kwargs( MllamaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) text_kwargs = output_kwargs["text_kwargs"] images_kwargs = output_kwargs["images_kwargs"] common_kwargs = output_kwargs["common_kwargs"] data = {} if text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)): raise ValueError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [t.count(self.image_token) for t in text] text = [build_string_from_input(text_item, self.bos_token, self.image_token) for text_item in text] _ = text_kwargs.pop("padding_side", None) # hack until padding-side is an accepted kwarg by tokenizers encoding = self.tokenizer(text, **text_kwargs) data.update(encoding) n_images_in_images = [0] if images is not None: images = make_list_of_images(images) n_images_in_images = [len(sample) for sample in images] if text is not None: if any(batch_img == 0 for batch_img in n_images_in_text) and not all( batch_img == 0 for batch_img in n_images_in_text ): raise ValueError( "If a batch of text is provided, there should be either no images or at least one image per sample" ) if sum(n_images_in_images) != sum(n_images_in_text): if images is None: raise ValueError("No image were provided, but there are image tokens in the prompt") else: raise ValueError( f"The number of image token ({sum(n_images_in_text)}) should be the same as in the number of provided images ({sum(n_images_in_images)})" ) if images is not None: image_features = self.image_processor(images, **images_kwargs) num_tiles = image_features.pop("num_tiles") data.update(image_features) # Create cross attention mask if images is not None and text is not None: cross_attention_token_mask = [ get_cross_attention_token_mask(token_ids, self.image_token_id) for token_ids in encoding["input_ids"] ] cross_attention_mask = convert_sparse_cross_attention_mask_to_dense( cross_attention_token_mask, num_tiles=num_tiles, max_num_tiles=self.image_processor.max_image_tiles, length=max(len(input_ids) for input_ids in encoding["input_ids"]), ) data["cross_attention_mask"] = cross_attention_mask return_tensors = common_kwargs.pop("return_tensors", None) batch_feature = BatchFeature(data=data, tensor_type=return_tensors) return batch_feature def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def post_process_image_text_to_text(self, generated_outputs): """ Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. Returns: `List[str]`: The decoded text. """ return self.tokenizer.batch_decode( generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False ) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names + image_processor_input_names + ["cross_attention_mask"])
class_definition
6,914
16,276
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/processing_mllama.py
null
3,353
class MllamaVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaVisionModel`]. It is used to instantiate an Mllama vision model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-11B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1280): Dimensionality of the encoder layers and the pooler layer. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_global_layers (`int`, *optional*, defaults to 8): Number of global layers in the Transformer encoder. Vision model has a second transformer encoder, called global. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. intermediate_size (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. vision_output_dim (`int`, *optional*, defaults to 7680): Dimensionality of the vision model output. Includes output of transformer encoder with intermediate layers and global transformer encoder. image_size (`int`, *optional*, defaults to 448): The size (resolution) of each image *tile*. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. max_num_tiles (`int`, *optional*, defaults to 4): Maximum number of tiles for image splitting. intermediate_layers_indices (`List[int]`, *optional*, defaults to [3, 7, 15, 23, 30]): Indices of intermediate layers of transformer encoder from which to extract and output features. These output features are concatenated with final hidden state of transformer encoder. supported_aspect_ratios (`List[List[int]]`, *optional*): List of supported aspect ratios for image splitting. If not specified, the default supported aspect ratios are [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] for `max_num_tiles=4`. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MllamaVisionConfig, MllamaVisionModel >>> # Initializing a Llama config >>> config = MllamaVisionConfig() >>> # Initializing a vision model from the mllama-11b style configuration >>> model = MllamaVisionModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size: int = 1280, hidden_act: str = "gelu", num_hidden_layers: int = 32, num_global_layers: int = 8, num_attention_heads: int = 16, num_channels: int = 3, intermediate_size: int = 5120, vision_output_dim: int = 7680, image_size: int = 448, patch_size: int = 14, norm_eps: float = 1e-5, max_num_tiles: int = 4, intermediate_layers_indices: Optional[List[int]] = None, supported_aspect_ratios: Optional[List[List[int]]] = None, initializer_range: float = 0.02, **kwargs, ): if supported_aspect_ratios is None: if max_num_tiles != 4: raise ValueError("max_num_tiles must be 4 for default supported aspect ratios") supported_aspect_ratios = [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] if intermediate_layers_indices is None: intermediate_layers_indices = [3, 7, 15, 23, 30] self.hidden_size = hidden_size self.hidden_act = hidden_act self.num_hidden_layers = num_hidden_layers self.num_channels = num_channels self.intermediate_size = intermediate_size self.image_size = image_size self.vision_output_dim = vision_output_dim self.patch_size = patch_size self.intermediate_layers_indices = intermediate_layers_indices self.num_global_layers = num_global_layers self.max_num_tiles = max_num_tiles self.norm_eps = norm_eps self.attention_heads = num_attention_heads self.supported_aspect_ratios = supported_aspect_ratios self.initializer_range = initializer_range super().__init__(**kwargs) @property def max_aspect_ratio_id(self) -> int: return len(self.supported_aspect_ratios)
class_definition
876
6,517
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/configuration_mllama.py
null
3,354
class MllamaTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaTextModel`]. It is used to instantiate an Mllama text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-11B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 128256): Vocabulary size of the Mllama text model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MllamaTextModel`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. hidden_act (`str` or `Callable`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If not specified, will default to `num_attention_heads`. intermediate_size (`int`, *optional*, defaults to 14336): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. rope_theta (`float`, *optional*, defaults to `500000.0`): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings cross_attention_layers (`List[int]`, *optional*): Indices of the cross attention layers. If not specified, will default to [3, 8, 13, 18, 23, 28, 33, 38]. dropout (`float`, *optional*, defaults to 0): The dropout probability for self- and cross-attention layers. bos_token_id (`int`, *optional*, defaults to 128000): The id of the beginning of sentence token. eos_token_id (`int`, *optional*, defaults to 128001): The id of the end of sentence token. pad_token_id (`int`, *optional*, defaults to 128004): The id of the padding token. Example: ```python >>> from transformers import MllamaTextModel, MllamaTextConfig >>> # Initializing a Mllama text config >>> config = MllamaTextConfig() >>> # Initializing a model from the Mllama text configuration >>> model = MllamaTextModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama_text_model" base_config_key = "text_config" def __init__( self, vocab_size: int = 128256, hidden_size: int = 4096, hidden_act: str = "silu", num_hidden_layers: int = 40, num_attention_heads: int = 32, num_key_value_heads: int = 8, intermediate_size: int = 14_336, rope_theta: float = 500_000, rope_scaling: Optional[Dict] = None, rms_norm_eps: float = 1e-5, max_position_embeddings: int = 131_072, initializer_range: float = 0.02, use_cache: bool = True, tie_word_embeddings: bool = False, cross_attention_layers: Optional[List[int]] = None, dropout: float = 0, bos_token_id: int = 128000, eos_token_id: int = 128001, pad_token_id: Optional[int] = 128004, **kwargs, ): if cross_attention_layers is None: cross_attention_layers = [3, 8, 13, 18, 23, 28, 33, 38] self.vocab_size = vocab_size self.num_hidden_layers = num_hidden_layers self.cross_attention_layers = cross_attention_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.use_cache = use_cache self.rope_theta = rope_theta self.rms_norm_eps = rms_norm_eps self.intermediate_size = intermediate_size self.dropout = dropout self.hidden_act = hidden_act self.rope_scaling = rope_scaling self.max_position_embeddings = max_position_embeddings rope_config_validation(self) super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
class_definition
6,520
15,208
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/configuration_mllama.py
null
3,355
class MllamaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaForConditionalGeneration`]. It is used to instantiate an Mllama model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-9B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaTextConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 128256): The image token index to encode the image prompt. Example: ```python >>> from transformers import MllamaForConditionalGeneration, MllamaConfig, MllamaVisionConfig, MllamaTextConfig >>> # Initializing a CLIP-vision config >>> vision_config = MllamaVisionConfig() >>> # Initializing a Llama config >>> text_config = MllamaTextConfig() >>> # Initializing a mllama-11b style configuration >>> configuration = MllamaConfig(vision_config, text_config) >>> # Initializing a model from the mllama-11b style configuration >>> model = MllamaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama" sub_configs = {"text_config": MllamaTextConfig, "vision_config": MllamaVisionConfig} def __init__( self, vision_config=None, text_config=None, image_token_index=128256, **kwargs, ): if vision_config is None: self.vision_config = MllamaVisionConfig() logger.info("vision_config is None, using default mllama vision config") elif isinstance(vision_config, dict): self.vision_config = MllamaVisionConfig(**vision_config) elif isinstance(vision_config, MllamaVisionConfig): self.vision_config = vision_config self.image_token_index = image_token_index if text_config is None: self.text_config = MllamaTextConfig() logger.info("text_config is None, using default mllama text config") elif isinstance(text_config, dict): self.text_config = MllamaTextConfig(**text_config) elif isinstance(text_config, MllamaTextConfig): self.text_config = text_config super().__init__(**kwargs)
class_definition
15,211
18,116
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/configuration_mllama.py
null
3,356
class MllamaPrecomputedAspectRatioEmbedding(nn.Module): def __init__(self, config: MllamaVisionConfig, is_gated: bool = True): super().__init__() self.max_num_tiles = config.max_num_tiles self.hidden_size = config.hidden_size self.max_aspect_ratio_id = config.max_aspect_ratio_id self.is_gated = is_gated self.embedding = nn.Embedding(self.max_aspect_ratio_id + 1, self.max_num_tiles * self.hidden_size) if is_gated: self.gate = nn.Parameter(torch.zeros(1)) def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor: embeddings = self.embedding(aspect_ratio_ids) embeddings = embeddings.reshape(-1, self.max_num_tiles, 1, self.hidden_size) if self.is_gated: embeddings = embeddings * self.gate.tanh() hidden_state = hidden_state + embeddings return hidden_state
class_definition
3,758
4,687
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,357
class MllamaPrecomputedPositionEmbedding(nn.Module): def __init__(self, config: MllamaVisionConfig): super().__init__() self.max_num_tiles = config.max_num_tiles self.max_aspect_ratio_id = config.max_aspect_ratio_id self.num_patches = (config.image_size // config.patch_size) ** 2 + 1 self.hidden_size = config.hidden_size self.scale = config.hidden_size**-0.5 self.gate = nn.Parameter(torch.zeros(1)) # position embedding position_embedding = torch.randn(self.num_patches, self.hidden_size) self.embedding = nn.Parameter(self.scale * position_embedding) # tile position embedding self.tile_embedding = nn.Embedding( self.max_aspect_ratio_id + 1, self.max_num_tiles * self.num_patches * self.hidden_size ) def forward(self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor) -> torch.Tensor: # position embeddings gated_position_embedding = (1 - self.gate.tanh()) * self.embedding hidden_state = hidden_state + gated_position_embedding.view(1, 1, self.num_patches, self.hidden_size) # precomputed tile position embeddings tile_position_embedding = self.tile_embedding(aspect_ratio_ids) batch_size = hidden_state.shape[0] tile_position_embedding = tile_position_embedding.reshape( batch_size, self.max_num_tiles, self.num_patches, self.hidden_size ) gated_tile_position_embedding = self.gate.tanh() * tile_position_embedding hidden_state = hidden_state + gated_tile_position_embedding return hidden_state
class_definition
4,690
6,332
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,358
class MllamaVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
6,420
6,998
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,359
class MllamaVisionAttention(nn.Module): def __init__(self, config: MllamaVisionConfig): super().__init__() self.embed_dim = config.hidden_size self.num_heads = config.attention_heads self.head_dim = config.hidden_size // config.attention_heads self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.embed_dim, bias=False) def forward( self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = None, ) -> torch.Tensor: query = self.q_proj(hidden_state) key = self.k_proj(hidden_state) value = self.v_proj(hidden_state) batch_size, q_seq_len, _ = query.shape _, kv_seq_len, _ = key.shape query = query.view(batch_size, q_seq_len, self.num_heads, self.head_dim).transpose(1, 2) key = key.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2) value = value.view(batch_size, kv_seq_len, self.num_heads, self.head_dim).transpose(1, 2) attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_seq_len, -1) output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return output, attn_weights
class_definition
7,001
9,106
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,360
class MllamaVisionSdpaAttention(MllamaVisionAttention): # Adapted from MllamaVisionAttention def forward( self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = None, ) -> torch.Tensor: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. if output_attentions: logger.warning_once( "MllamaModel is using MllamaVisionSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_state=hidden_state, attention_mask=attention_mask, output_attentions=output_attentions, ) query = self.q_proj(hidden_state) key = self.k_proj(hidden_state) value = self.v_proj(hidden_state) batch_size, q_seq_len, _ = query.shape _, kv_seq_len, _ = key.shape query = query.view(batch_size, q_seq_len, self.num_heads, self.head_dim) key = key.view(batch_size, kv_seq_len, self.num_heads, self.head_dim) value = value.view(batch_size, kv_seq_len, self.num_heads, self.head_dim) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) attn_output = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_seq_len, -1) output = self.o_proj(attn_output) return output, None
class_definition
9,109
11,083
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,361
class MllamaVisionEncoderLayer(nn.Module): def __init__(self, config: MllamaVisionConfig, is_gated: bool = False): super().__init__() self.hidden_size = config.hidden_size self.num_attention_heads = config.attention_heads self.is_gated = is_gated self.intermediate_size = config.intermediate_size self.self_attn = MLLAMA_VISION_ATTENTION_CLASSES[config._attn_implementation](config) self.mlp = MllamaVisionMLP(config) self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps) self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps) if is_gated: self.gate_attn = nn.Parameter(torch.ones(1) * math.pi / 4) self.gate_ffn = nn.Parameter(torch.ones(1) * math.pi / 4) def forward( self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = None, ): # Self Attention residual = hidden_state hidden_state = self.input_layernorm(hidden_state) hidden_state, attn_weights = self.self_attn(hidden_state, attention_mask=attention_mask) if self.is_gated: hidden_state = self.gate_attn.tanh() * hidden_state hidden_state = residual + hidden_state # Feed forward residual = hidden_state hidden_state = self.post_attention_layernorm(hidden_state) hidden_state = self.mlp(hidden_state) if self.is_gated: hidden_state = self.gate_ffn.tanh() * hidden_state hidden_state = residual + hidden_state outputs = (hidden_state,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
11,190
12,961
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,362
class MllamaVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`MllamaEncoderLayer`]. Args: config: MllamaConfig """ def __init__(self, config: MllamaVisionConfig, num_layers=32, is_gated=False): super().__init__() self.config = config self.layers = nn.ModuleList([MllamaVisionEncoderLayer(config, is_gated) for _ in range(num_layers)]) self.gradient_checkpointing = False self.config = config def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = layer_outputs[0] if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
12,964
16,907
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,363
class MllamaTextRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ MllamaTextRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class_definition
17,001
17,731
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,364
class MllamaTextCrossAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: Optional[MllamaTextConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.config = config self.num_heads = self.config.num_attention_heads self.num_key_value_heads = self.config.num_key_value_heads self.dropout = config.dropout self.hidden_size = config.hidden_size self.head_dim = config.hidden_size // self.num_heads self.layer_idx = layer_idx self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.q_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps) self.k_norm = MllamaTextRMSNorm(self.head_dim, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, use_cache: bool = None, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = self.q_norm(query_states) if cross_attention_states is not None: key_states = self.k_proj(cross_attention_states) value_states = self.v_proj(cross_attention_states) key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) key_states = self.k_norm(key_states) if past_key_value is not None: # if we have a new image + new tokens, we only computed key_states on that new image # we still update the cross key states, past_image, new_image. And use it! key_states, value_states = past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) elif cache_position[0] != 0: key_states, value_states = ( past_key_value.key_cache[self.layer_idx], past_key_value.value_cache[self.layer_idx], ) else: raise ValueError( "Cross attention layer can't find neither `cross_attn_states` nor cached values for key/values!" ) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
17,734
21,995
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,365
class MllamaTextCrossSdpaAttention(MllamaTextCrossAttention): """ Mllama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MllamaTextCrossAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from MllamaTextCrossAttention.forward def forward( self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, use_cache: bool = None, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MllamaModel is using MllamaTextCrossSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, cross_attention_states=cross_attention_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = self.q_norm(query_states) if cross_attention_states is not None: key_states = self.k_proj(cross_attention_states) value_states = self.v_proj(cross_attention_states) key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: # if we have a new image + new tokens, we only computed key_states on that new image # we still update the cross key states, past_image, new_image. And use it! key_states, value_states = past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) elif cache_position[0] != 0: key_states, value_states = ( past_key_value.key_cache[self.layer_idx], past_key_value.value_cache[self.layer_idx], ) else: raise ValueError( "Cross attention layer can't find neither `cross_attn_states` nor cached values for key/values!" ) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) key_states = self.k_norm(key_states) # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if attention_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value
class_definition
21,998
26,857
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,366
class MllamaTextSelfAttention(nn.Module): def __init__(self, config: MllamaTextConfig, layer_idx: int): super().__init__() self.config = config self.num_heads = config.num_attention_heads self.dropout = config.dropout self.hidden_size = config.hidden_size self.num_key_value_heads = config.num_key_value_heads self.head_dim = config.hidden_size // self.num_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.rope_theta = config.rope_theta self.layer_idx = layer_idx self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, output_attentions: bool = False, use_cache: bool = False, past_key_value=None, cache_position=None, **kwargs, ): bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
29,402
32,732
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,367
class MllamaTextSelfSdpaAttention(MllamaTextSelfAttention): # Adapted from MllamaTextSelfAttention def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, output_attentions: bool = False, use_cache: bool = False, past_key_value=None, cache_position=None, **kwargs, ): if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MllamaModel is using MllamaTextSelfSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value
class_definition
32,735
36,778
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,368
class MllamaTextMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) # Ignore copy self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class_definition
37,090
37,785
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,369
class MllamaSelfAttentionDecoderLayer(nn.Module): def __init__(self, config: MllamaTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MLLAMA_TEXT_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = MllamaTextMLP(config) self.input_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor] = None, cross_attention_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, full_text_row_masked_out_mask: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class_definition
37,863
41,876
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,370
class MllamaCrossAttentionDecoderLayer(torch.nn.Module): """Cross-attention transformer block with tanh-gated attention and feedforward.""" def __init__(self, config: MllamaTextConfig, layer_idx: int) -> None: super().__init__() self.layer_idx = layer_idx self.cross_attn = MLLAMA_TEXT_CROSS_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.input_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.cross_attn_attn_gate = torch.nn.Parameter(torch.zeros(1)) self.mlp = MllamaTextMLP(config) self.post_attention_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.cross_attn_mlp_gate = torch.nn.Parameter(torch.zeros(1)) def forward( self, hidden_states: torch.Tensor, cross_attention_states: torch.Tensor, cross_attention_mask: torch.Tensor, attention_mask: torch.Tensor, full_text_row_masked_out_mask: Tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, attn_weights, past_key_value = self.cross_attn( hidden_states=hidden_states, attention_mask=cross_attention_mask, cross_attention_states=cross_attention_states, past_key_value=past_key_value, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = residual + self.cross_attn_attn_gate.tanh() * hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) if full_text_row_masked_out_mask is not None: hidden_states = full_text_row_masked_out_mask[:, 0] * hidden_states # type: ignore hidden_states = residual + self.cross_attn_mlp_gate.tanh() * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if use_cache: outputs += (past_key_value,) return outputs
class_definition
41,879
44,400
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,371
class MllamaRotaryEmbedding(nn.Module): def __init__(self, config: MllamaTextConfig, device=None): super().__init__() self.rope_type = config.rope_scaling["rope_type"] self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn( self.config, device, seq_len=seq_len, **self.rope_kwargs ) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
44,403
47,176
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,372
class MllamaPreTrainedModel(PreTrainedModel): config_class = MllamaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = [ "MllamaVisionEncoderLayer", "MllamaCrossAttentionDecoderLayer", "MllamaSelfAttentionDecoderLayer", ] _supports_cache_class = True _supports_static_cache = False # static cache cannot have different shapes for each layer _supports_sdpa = True _supports_quantized_cache = True def _init_weights(self, module): std = self.config.get_text_config().initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Parameter): module.data.normal_(mean=0.0, std=std) elif isinstance(module, MllamaVisionModel): nn.init.normal_(module.class_embedding.data, std=std) elif isinstance(module, MllamaPrecomputedPositionEmbedding): nn.init.normal_(module.embedding.data, std=std) elif isinstance(module, MllamaVisionEncoderLayer) and module.is_gated: nn.init.normal_(module.gate_attn.data, std=std) nn.init.normal_(module.gate_ffn.data, std=std) # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
47,179
54,697
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,373
class MllamaVisionModel(MllamaPreTrainedModel): config_class = MllamaVisionConfig base_model_prefix = "vision_model" def __init__(self, config: MllamaVisionConfig): super().__init__(config) self.image_size = config.image_size self.patch_size = config.patch_size self.max_num_tiles = config.max_num_tiles self.hidden_size = config.hidden_size self.num_channels = config.num_channels self.intermediate_layers_indices = config.intermediate_layers_indices self.num_patches = (self.image_size // self.patch_size) ** 2 + 1 self.scale = config.hidden_size**-0.5 self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.hidden_size, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", bias=False, ) self.class_embedding = nn.Parameter(self.scale * torch.randn(self.hidden_size)) self.gated_positional_embedding = MllamaPrecomputedPositionEmbedding(config) self.pre_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(config, is_gated=True) self.post_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(config, is_gated=True) # layer norms self.layernorm_pre = nn.LayerNorm(self.hidden_size) self.layernorm_post = nn.LayerNorm(self.hidden_size) # encoders self.transformer = MllamaVisionEncoder(config, config.num_hidden_layers, is_gated=False) self.global_transformer = MllamaVisionEncoder(config, config.num_global_layers, is_gated=True) self.post_init() def get_input_embeddings(self): """ This function is used to fetch the first embedding layer to activate grads on inputs. """ return self.patch_embedding def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor: batch_size, _, hidden_size = hidden_state.shape class_embedding = self.class_embedding.expand(batch_size, 1, hidden_size) hidden_state = torch.cat([class_embedding, hidden_state], dim=1) return hidden_state @add_start_docstrings_to_model_forward(MLLAMA_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class="MllamaVisionConfig") def forward( self, pixel_values: torch.Tensor, aspect_ratio_ids: torch.Tensor, aspect_ratio_mask: torch.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: r""" Returns: Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MllamaVisionModel >>> checkpoint = "meta-llama/Llama-3.2-11B-Vision" >>> model = MllamaVisionModel.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> output = model(**inputs) >>> print(output.last_hidden_state.shape) torch.Size([1, 1, 4, 1025, 7680]) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_concurrent_media, num_tiles, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * num_concurrent_media * num_tiles, num_channels, height, width) aspect_ratio_ids = aspect_ratio_ids.reshape(batch_size * num_concurrent_media, -1) # Patch embedding patch_embeds = self.patch_embedding(pixel_values.to(self.dtype).to(self.device)) hidden_state = patch_embeds.flatten(2).transpose(1, 2) # Tile embeddings _, num_patches, dim = hidden_state.shape hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, -1, dim) hidden_state = self.pre_tile_positional_embedding(hidden_state, aspect_ratio_ids) # Add cls token hidden_state = hidden_state.reshape(batch_size * num_concurrent_media * num_tiles, num_patches, dim) hidden_state = self.apply_class_embedding(hidden_state) num_patches += 1 # Position embeddings hidden_state = hidden_state.reshape(batch_size * num_concurrent_media, num_tiles, num_patches, dim) hidden_state = self.gated_positional_embedding(hidden_state, aspect_ratio_ids) hidden_state = self.layernorm_pre(hidden_state) # Compute the number of tokens to pad num_padding_patches = (8 - (hidden_state.shape[-2] % 8)) % 8 # Compute padding tuple for pad function padding = (0, 0, 0, num_padding_patches) # (pad_left, pad_right, pad_left for dim -2, pad_right for dim -2) # Pad the tensor hidden_state = F.pad(hidden_state, padding, mode="constant", value=0) slice_index = -num_padding_patches if num_padding_patches > 0 else None # Prepare attention mask attention_mask = aspect_ratio_mask.reshape(batch_size * num_concurrent_media, -1) attention_mask = _prepare_aspect_ratio_attention_mask( aspect_ratio_mask=attention_mask, num_patches=self.num_patches, target_length=hidden_state.shape[2], dtype=self.dtype, ) # Apply encoder hidden_state = hidden_state.view(batch_size * num_concurrent_media, -1, dim) output = self.transformer( hidden_state, attention_mask=attention_mask, output_hidden_states=True, output_attentions=output_attentions, ) hidden_state = output[0] hidden_state = self.layernorm_post(hidden_state) # Apply global encoder hidden_state = hidden_state.reshape( batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, dim ) hidden_state = self.post_tile_positional_embedding(hidden_state, aspect_ratio_ids) hidden_state = hidden_state.reshape( batch_size * num_concurrent_media, num_tiles * (num_patches + num_padding_patches), dim ) global_output = self.global_transformer( hidden_state, attention_mask=attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) hidden_state = global_output[0] # Remove padding form hidden state hidden_state = hidden_state.reshape( batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, dim ) hidden_state = hidden_state[:, :, :slice_index] hidden_state = hidden_state.reshape(batch_size, num_concurrent_media, num_tiles, num_patches, dim) # Collect intermediate layer outputs from encoder output all_intermediate_hidden_states = [output[1][i] for i in self.intermediate_layers_indices] intermediate_hidden_states = torch.stack(all_intermediate_hidden_states, dim=-1) # Remove padding from intermediate hidden states intermediate_hidden_states = intermediate_hidden_states.reshape( batch_size * num_concurrent_media, num_tiles, num_patches + num_padding_patches, -1 ) intermediate_hidden_states = intermediate_hidden_states[:, :, :slice_index] intermediate_hidden_states = intermediate_hidden_states.reshape( batch_size, num_concurrent_media, num_tiles, num_patches, -1 ) # Concatenate final hidden state and intermediate hidden states hidden_state = torch.cat([hidden_state, intermediate_hidden_states], dim=-1) if output_hidden_states: hidden_states = tuple(all_intermediate_hidden_states) + tuple(global_output[1]) else: hidden_states = None if output_attentions: # global transformer in contrast to `self.transformer` doesn't always return hidden states so we might go index out-of-range global_attn = tuple(global_output[2]) if output_hidden_states else tuple(global_output[1]) attentions = tuple(output[2]) + global_attn else: attentions = None if not return_dict: return tuple(v for v in [hidden_state, hidden_states, attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_state, hidden_states=hidden_states, attentions=attentions, )
class_definition
70,201
79,315
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,374
class MllamaTextModel(MllamaPreTrainedModel): config_class = MllamaTextConfig base_model_prefix = "language_model.model" def __init__(self, config: MllamaTextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size + 8, config.hidden_size, self.padding_idx) self.cross_attention_layers = config.cross_attention_layers layers = [] for layer_idx in range(config.num_hidden_layers): if layer_idx in self.cross_attention_layers: layers.append(MllamaCrossAttentionDecoderLayer(config, layer_idx)) else: layers.append(MllamaSelfAttentionDecoderLayer(config, layer_idx)) self.layers = nn.ModuleList(layers) self.norm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = MllamaRotaryEmbedding(config=config) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(MLLAMA_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class="MllamaTextConfig") def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cross_attention_states: Optional[torch.FloatTensor] = None, cross_attention_mask: Optional[torch.Tensor] = None, full_text_row_masked_out_mask: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: """ Returns: Example: ```python >>> from transformers import AutoProcessor, MllamaTextModel >>> checkpoint = "meta-llama/Llama-3.2-11B-Vision" >>> model = MllamaTextModel.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) >>> text = "<|image|>If I had to write a haiku for this one" >>> inputs = processor(text=text, return_tensors="pt") >>> output = model(**inputs) >>> print(output.last_hidden_state.shape) torch.Size([1, 13, 4096]) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) # For text-only path we should skip cross attention layers. # Let's check if the layer is cross attention layer and if we have cross attention states # or cached cross attention states. is_cross_attention_layer = idx in self.cross_attention_layers is_cross_attention_cache_empty = past_key_values is None or ( past_key_values is not None and past_key_values.get_seq_length(idx) == 0 ) if is_cross_attention_layer and cross_attention_states is None and is_cross_attention_cache_empty: continue if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, cross_attention_states, cross_attention_mask, causal_mask, full_text_row_masked_out_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, cross_attention_states=cross_attention_states, cross_attention_mask=cross_attention_mask, attention_mask=causal_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, )
class_definition
79,472
87,185
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,375
class MllamaForCausalLM(MllamaPreTrainedModel, GenerationMixin): config_class = MllamaTextConfig _supports_static_cache = True # only the LLM without cross attn can do compile base_model_prefix = "language_model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config.get_text_config()) self.text_config = config.get_text_config() self.vocab_size = self.text_config.vocab_size self.model = MllamaTextModel._from_config(self.text_config) self.lm_head = nn.Linear(self.text_config.hidden_size, self.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(MLLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="MllamaTextConfig") def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cross_attention_states: Optional[torch.LongTensor] = None, cross_attention_mask: Optional[torch.LongTensor] = None, full_text_row_masked_out_mask: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, **loss_kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from transformers import AutoTokenizer, MllamaForCausalLM >>> model = MllamaForCausalLM.from_pretrained("Llama-3.2-11B-Vision") >>> tokenizer = AutoTokenizer.from_pretrained("Llama-3.2-11B-Vision") >>> prompt = "If I had to write a haiku, it would be:" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=40, do_sample=True, temperature=0.6) >>> result = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> print(result) If I had to write a haiku, it would be: "Snowflakes gently fall" - simple, yet peaceful. I love the idea of snowflakes gently falling, each one ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, cross_attention_states=cross_attention_states, attention_mask=attention_mask, position_ids=position_ids, cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]).float() loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
87,312
92,995
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,376
class MllamaForConditionalGeneration(MllamaPreTrainedModel, GenerationMixin): _supports_quantized_cache = False # quant cache not supported in encoder-decoder setting def __init__(self, config: MllamaConfig): super().__init__(config) self.vocab_size = config.text_config.vocab_size self.hidden_size = config.text_config.hidden_size self.max_num_tiles = config.vision_config.max_num_tiles self.vision_output_dim = config.vision_config.vision_output_dim self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.vision_model = MllamaVisionModel._from_config(config.vision_config) self.language_model = MllamaForCausalLM._from_config(config.text_config) if self.language_model._tied_weights_keys is not None: self._tied_weights_keys = [f"language_model.{k}" for k in self.language_model._tied_weights_keys] self.multi_modal_projector = nn.Linear( config.vision_config.vision_output_dim, config.text_config.hidden_size, bias=True, ) self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() @add_start_docstrings_to_model_forward(MLLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="MllamaConfig") def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, aspect_ratio_mask: Optional[torch.Tensor] = None, aspect_ratio_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_mask: Optional[torch.Tensor] = None, cross_attention_states: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. num_logits_to_keep (`int`, *optional*): Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. Returns: Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MllamaForConditionalGeneration >>> checkpoint = "meta-llama/Llama-3.2-11B-Vision" >>> model = MllamaForConditionalGeneration.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) >>> prompt = "<|image|>If I had to write a haiku for this one" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> # Generate >>> output = model.generate(**inputs, max_new_tokens=15) >>> prompt_len = inputs.input_ids.shape[-1] >>> generated_ids = output[:, prompt_len:] >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) >>> print(generated_text) [', it would be:.\\nA stop sign in Chinatown.\\n'] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if pixel_values is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one" ) if pixel_values is not None and cross_attention_states is not None: raise ValueError("`pixel_values` and `cross_attention_states` cannot be provided simultaneously") if pixel_values is not None: if aspect_ratio_ids is None: raise ValueError("`aspect_ratio_ids` must be provided if `pixel_values` is provided") # get vision tokens from vision model vision_outputs = self.vision_model( pixel_values=pixel_values, aspect_ratio_ids=aspect_ratio_ids, aspect_ratio_mask=aspect_ratio_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) cross_attention_states = vision_outputs[0] cross_attention_states = self.multi_modal_projector(cross_attention_states).reshape( -1, cross_attention_states.shape[-2], self.hidden_size ) if cross_attention_mask is not None: cross_attention_mask, full_text_row_masked_out_mask = _prepare_cross_attention_mask( cross_attention_mask, num_vision_tokens=self.vision_model.num_patches, dtype=self.dtype, ) else: full_text_row_masked_out_mask = None if cross_attention_mask is not None and cache_position is not None: cross_attention_mask = cross_attention_mask[:, :, cache_position] full_text_row_masked_out_mask = full_text_row_masked_out_mask[:, :, cache_position] outputs = self.language_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, cross_attention_states=cross_attention_states, cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, use_cache=use_cache, inputs_embeds=inputs_embeds, labels=labels, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position, num_logits_to_keep=num_logits_to_keep, ) return outputs def prepare_inputs_for_generation( self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, pixel_values=None, aspect_ratio_ids=None, aspect_ratio_mask=None, cross_attention_mask=None, past_key_values=None, use_cache=False, cache_position=None, num_logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here if past_key_values is not None: if inputs_embeds is not None: # Exception 1 input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] # TODO: we have no attention_mask so this won't work, check if we really won't need attention mask and find another way if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture. position_ids = position_ids.clone(memory_format=torch.contiguous_format) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None} else: # The clone here is for the same reason as for `position_ids`. model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} if num_logits_to_keep is not None: model_inputs["num_logits_to_keep"] = num_logits_to_keep model_inputs.update( { "position_ids": position_ids, "cache_position": cache_position, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "cross_attention_mask": cross_attention_mask, } ) # If we're in pre-fill or cacheless decoding step, then we need pixel_values and aspect ratios # to compute image hidden states, otherwise they are cached within each cross attn layer if cache_position[0] == 0: model_inputs["pixel_values"] = pixel_values model_inputs["aspect_ratio_ids"] = aspect_ratio_ids model_inputs["aspect_ratio_mask"] = aspect_ratio_mask return model_inputs def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs): cross_attention_mask_prev = model_kwargs.get("cross_attention_mask", None) model_kwargs = super()._update_model_kwargs_for_generation( outputs=outputs, model_kwargs=model_kwargs, is_encoder_decoder=is_encoder_decoder, **kwargs, ) # add cross-attn mask for new token if cross_attention_mask_prev is not None: model_kwargs["cross_attention_mask"] = torch.cat( [cross_attention_mask_prev, cross_attention_mask_prev[:, -1:, ...]], dim=1 ) return model_kwargs
class_definition
93,136
105,369
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mllama/modeling_mllama.py
null
3,377
class OmDetTurboTextKwargs(TextKwargs, total=False): task: Optional[Union[str, List[str], TextInput, PreTokenizedInput]]
class_definition
1,264
1,388
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
null
3,378
class OmDetTurboProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: OmDetTurboTextKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": "max_length", "truncation": True, "max_length": 77, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, "task": None, }, "images_kwargs": {}, }
class_definition
1,518
2,149
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
null
3,379
class DictWithDeprecationWarning(dict): message = ( "The `classes` key is deprecated for `OmDetTurboProcessor.post_process_grounded_object_detection` " "output dict and will be removed in a 4.51.0 version. Please use `text_labels` instead." ) def __getitem__(self, key): if key == "classes": warnings.warn(self.message, FutureWarning) return super().__getitem__("text_labels") return super().__getitem__(key) def get(self, key, *args, **kwargs): if key == "classes": warnings.warn(self.message, FutureWarning) return super().get("text_labels", *args, **kwargs) return super().get(key, *args, **kwargs)
class_definition
2,152
2,867
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
null
3,380
class OmDetTurboProcessor(ProcessorMixin): r""" Constructs a OmDet-Turbo processor which wraps a Deformable DETR image processor and an AutoTokenizer into a single processor. [`OmDetTurboProcessor`] offers all the functionalities of [`DetrImageProcessor`] and [`AutoTokenizer`]. See the docstring of [`~OmDetTurboProcessor.__call__`] and [`~OmDetTurboProcessor.decode`] for more information. Args: image_processor (`DetrImageProcessor`): An instance of [`DetrImageProcessor`]. The image processor is a required input. tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "DetrImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images: ImageInput = None, text: Union[List[str], List[List[str]]] = None, audio=None, videos=None, **kwargs: Unpack[OmDetTurboProcessorKwargs], ) -> BatchFeature: """ This method uses [*DetrImageProcessor.__call__] method to prepare image(s) for the model, and [CLIPTokenizerFast.__call__] to prepare text for the model. Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. text (`Union[str, List[str], List[List[str]]]`): The classes used to limit the scope of the open vocabulary detection. Expects a list of strings or a list of list of strings. Batched classes can be of different lengths. Examples: ["cat", "dog", "bird"], [["cat", "dog", "bird"], ["hat", "person"], ["car"]] Kwargs: task (`Union[str, List[str], TextInput, PreTokenizedInput]`): The grounded text used to guide open vocabulary detection. Expects a single string or a list of strings. Examples: "Detect a cat, a dog, and a bird.",[ "Detect everything.", "Detect trees and flowers."] When not provided, the default task is "Detect [class1], [class2], [class3]" etc. ... """ if images is None or text is None: raise ValueError("You have to specify both `images` and `text`") output_kwargs = self._merge_kwargs( OmDetTurboProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = text.strip(" ").split(",") if not (len(text) and isinstance(text[0], (list, tuple))): text = [text] task = output_kwargs["text_kwargs"].pop("task", None) if task is None: task = ["Detect {}.".format(", ".join(text_single)) for text_single in text] elif not isinstance(task, (list, tuple)): task = [task] encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) tasks_encoding = self.tokenizer(text=task, **output_kwargs["text_kwargs"]) classes = text classes_structure = torch.tensor([len(class_single) for class_single in classes], dtype=torch.long) classes_flattened = [class_single for class_batch in classes for class_single in class_batch] classes_encoding = self.tokenizer(text=classes_flattened, **output_kwargs["text_kwargs"]) encoding = BatchFeature() encoding.update({f"tasks_{key}": value for key, value in tasks_encoding.items()}) encoding.update({f"classes_{key}": value for key, value in classes_encoding.items()}) encoding.update({"classes_structure": classes_structure}) encoding.update(encoding_image_processor) return encoding # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def _get_default_image_size(self) -> Tuple[int, int]: height = ( self.image_processor.size["height"] if "height" in self.image_processor.size else self.image_processor.size["shortest_edge"] ) width = ( self.image_processor.size["width"] if "width" in self.image_processor.size else self.image_processor.size["longest_edge"] ) return height, width @deprecate_kwarg("score_threshold", new_name="threshold", version="4.51.0") @deprecate_kwarg("classes", new_name="text_labels", version="4.51.0") def post_process_grounded_object_detection( self, outputs: "OmDetTurboObjectDetectionOutput", text_labels: Optional[Union[List[str], List[List[str]]]] = None, threshold: float = 0.3, nms_threshold: float = 0.5, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, max_num_det: Optional[int] = None, ): """ Converts the raw output of [`OmDetTurboForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format and get the associated text class. Args: outputs ([`OmDetTurboObjectDetectionOutput`]): Raw outputs of the model. text_labels (Union[List[str], List[List[str]]], *optional*): The input classes names. If not provided, `text_labels` will be set to `None` in `outputs`. threshold (float, defaults to 0.3): Only return detections with a confidence score exceeding this threshold. nms_threshold (float, defaults to 0.5): The threshold to use for box non-maximum suppression. Value in [0, 1]. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. max_num_det (`int`, *optional*): The maximum number of detections to return. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, classes and boxes for an image in the batch as predicted by the model. """ batch_size = len(outputs.decoder_coord_logits) # Inputs consistency check for target sizes if target_sizes is None: height, width = self._get_default_image_size() target_sizes = [(height, width)] * batch_size if any(len(image_size) != 2 for image_size in target_sizes): raise ValueError( "Each element of target_sizes must contain the size (height, width) of each image of the batch" ) if len(target_sizes) != batch_size: raise ValueError("Make sure that you pass in as many target sizes as output sequences") # Inputs consistency check for text labels if text_labels is not None and isinstance(text_labels[0], str): text_labels = [text_labels] if text_labels is not None and len(text_labels) != batch_size: raise ValueError("Make sure that you pass in as many classes group as output sequences") # Convert target_sizes to list for easier handling if isinstance(target_sizes, torch.Tensor): target_sizes = target_sizes.tolist() batch_boxes = outputs.decoder_coord_logits batch_logits = outputs.decoder_class_logits batch_num_classes = outputs.classes_structure batch_scores, batch_labels = compute_score(batch_logits) results = [] for boxes, scores, image_size, image_num_classes in zip( batch_boxes, batch_scores, target_sizes, batch_num_classes ): boxes, scores, labels = _post_process_boxes_for_image( boxes=boxes, scores=scores, labels=batch_labels, image_num_classes=image_num_classes, image_size=image_size, threshold=threshold, nms_threshold=nms_threshold, max_num_det=max_num_det, ) result = DictWithDeprecationWarning( {"boxes": boxes, "scores": scores, "labels": labels, "text_labels": None} ) results.append(result) # Add text labels if text_labels is not None: for result, image_text_labels in zip(results, text_labels): result["text_labels"] = [image_text_labels[idx] for idx in result["labels"]] return results
class_definition
7,727
17,374
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
null
3,381
class OmDetTurboEncoderOutput(ModelOutput): """ Base class for outputs of the OmDetTurboHybridEncoder. Args: last_hidden_state (`torch.FloatTensor`): Last hidden states of the encoder. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. extracted_states (`Tuple[torch.FloatTensor]`): The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None extracted_states: Tuple[torch.FloatTensor] = None
class_definition
1,705
3,257
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,382
class OmDetTurboDecoderOutput(ModelOutput): """ Base class for outputs of the OmDetTurboDecoder. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder. decoder_coords (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects. decoder_classes (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`): The predicted classes of the objects. encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects from the encoder. encoder_class_logits (`Tuple[torch.FloatTensor]`) of shape `(batch_size, num_queries, num_classes)`: The predicted class of the objects from the encoder. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The initial reference points. intermediate_reference_points (`Tuple[Tuple[torch.FloatTensor]]`): The intermediate reference points. hidden_states (`Optional[Tuple[torch.FloatTensor]]`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`Optional[Tuple[Tuple[torch.FloatTensor]]]`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_coords: torch.FloatTensor = None decoder_classes: torch.FloatTensor = None encoder_coord_logits: torch.FloatTensor = None encoder_class_logits: Tuple[torch.FloatTensor] = None init_reference_points: torch.FloatTensor = None intermediate_reference_points: Tuple[Tuple[torch.FloatTensor]] = None
class_definition
3,271
5,936
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,383
class OmDetTurboObjectDetectionOutput(ModelOutput): """ Output type of [`OmDetTurboObjectDetectionOutput`]. Args: loss (`torch.FloatTensor`): The loss value. decoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates logits of the objects. decoder_class_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`): The predicted class of the objects. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The initial reference points. intermediate_reference_points (`Tuple[Tuple[torch.FloatTensor]]`): The intermediate reference points. encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects from the encoder. encoder_class_logits (`Tuple[torch.FloatTensor]`): The predicted class of the objects from the encoder. encoder_extracted_states (`torch.FloatTensor`): The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder. decoder_hidden_states (`Tuple[torch.FloatTensor]`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. decoder_attentions (`Tuple[Tuple[torch.FloatTensor]]`, *optional*): Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. encoder_hidden_states (`Tuple[torch.FloatTensor]`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. encoder_attentions (`Tuple[Tuple[torch.FloatTensor]]`, *optional*): Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. classes_structure (`torch.LongTensor`, *optional*): The number of queried classes for each image. """ loss: torch.FloatTensor = None decoder_coord_logits: torch.FloatTensor = None decoder_class_logits: torch.FloatTensor = None init_reference_points: torch.FloatTensor = None intermediate_reference_points: Optional[Tuple[Tuple[torch.FloatTensor]]] = None encoder_coord_logits: torch.FloatTensor = None encoder_class_logits: Tuple[torch.FloatTensor] = None encoder_extracted_states: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None classes_structure: Optional[torch.LongTensor] = None
class_definition
5,950
9,633
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,384
class OmDetTurboLRUCache: def __init__(self, capacity: int): self.cache = OrderedDict() self.capacity = capacity self.current_load = 0 def has(self, key) -> bool: return key in self.cache def get(self, key): """ Get the value of the key if the key exists in the cache, otherwise return None. Move the key to the end of the cache to show that it was recently used. """ if key not in self.cache: return None self.cache.move_to_end(key) return self.cache[key] def put(self, key, value) -> None: """ Add the key-value pair to the cache. Move the key to the end of the cache to show that it was recently used. If the cache is full, remove the first key (least recently used). """ if key not in self.cache: self.current_load += 1 if self.current_load > self.capacity: self.cache.popitem(last=False) self.current_load -= 1 self.cache[key] = value self.cache.move_to_end(key)
class_definition
12,780
13,887
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,385
class OmDetTurboLanguageBackbone(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.model = AutoModel.from_config(config.text_config) self.text_projection = nn.Parameter(torch.zeros(config.text_projection_in_dim, config.text_projection_out_dim)) def forward(self, hidden_states, mask=None, encode_type="task"): text_outputs = self.model(hidden_states) pooled_output = text_outputs[0] if encode_type == "task": if mask is None: raise ValueError("mask is required for task encoding") max_len = (mask != 0).sum(1).max().item() truncated_mask = mask[:, :max_len] truncated_output = pooled_output[:, :max_len, :] return truncated_output.transpose(0, 1), truncated_mask elif encode_type == "class": max_pooled_output = pooled_output[torch.arange(pooled_output.shape[0]), hidden_states.argmax(dim=-1)] projected_output = max_pooled_output @ self.text_projection return projected_output else: raise ValueError(f"encode_type {encode_type} is not supported")
class_definition
13,890
15,066
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,386
class OmDetTurboVisionBackbone(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.apply_layernorm_after_vision_backbone = config.apply_layernorm_after_vision_backbone self.vision_backbone = load_backbone(config) self.layer_norms = nn.ModuleList( [nn.LayerNorm(in_channel_dim, eps=config.layer_norm_eps) for in_channel_dim in config.encoder_in_channels] ) def forward(self, pixel_values): outputs = self.vision_backbone(pixel_values).feature_maps if self.apply_layernorm_after_vision_backbone: outputs = [ layer_norm(output).permute(0, 3, 1, 2).contiguous() for layer_norm, output in zip(self.layer_norms, outputs) ] return outputs
class_definition
15,069
15,872
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,387
class MultiScaleDeformableAttentionFunction(Function): @staticmethod def forward( context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step, ): context.im2col_step = im2col_step output = MultiScaleDeformableAttention.ms_deform_attn_forward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step, ) context.save_for_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights ) return output @staticmethod @once_differentiable def backward(context, grad_output): ( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ) = context.saved_tensors grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step, ) return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
class_definition
15,988
17,432
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,388
class OmDetTurboMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, config: OmDetTurboConfig, num_heads: int, n_points: int): super().__init__() kernel_loaded = MultiScaleDeformableAttention is not None if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") if config.d_model % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" ) dim_per_head = config.d_model // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in OmDetTurboMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = config.d_model self.n_levels = config.num_feature_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) self.value_proj = nn.Linear(config.d_model, config.d_model) self.output_proj = nn.Linear(config.d_model, config.d_model) self.disable_custom_kernels = config.disable_custom_kernels def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape # Ignore copy total_elements = sum([shape[0] * shape[1] for shape in spatial_shapes_list]) if total_elements != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 num_coordinates = reference_points.shape[-1] if num_coordinates == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif num_coordinates == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") if self.disable_custom_kernels: # PyTorch implementation output = multi_scale_deformable_attention( value, spatial_shapes_list, sampling_locations, attention_weights ) else: try: # custom kernel output = MultiScaleDeformableAttentionFunction.apply( value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) except Exception: # PyTorch implementation output = multi_scale_deformable_attention( value, spatial_shapes_list, sampling_locations, attention_weights ) output = self.output_proj(output) return output, attention_weights
class_definition
17,616
23,273
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,389
class OmDetTurboConvNormLayer(nn.Module): def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2 if padding is None else padding, bias=False, ) self.norm = nn.BatchNorm2d(out_channels, config.batch_norm_eps) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, hidden_state): hidden_state = self.conv(hidden_state) hidden_state = self.norm(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state
class_definition
23,379
24,169
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,390
class OmDetTurboRepVggBlock(nn.Module): """ RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again". """ def __init__(self, config: OmDetTurboConfig): super().__init__() activation = config.csp_activation hidden_channels = int(config.encoder_hidden_dim * config.hidden_expansion) self.conv1 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 3, 1, padding=1) self.conv2 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 1, 1, padding=0) self.activation = nn.Identity() if activation is None else ACT2CLS[activation]() def forward(self, x): y = self.conv1(x) + self.conv2(x) return self.activation(y)
class_definition
24,310
25,072
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,391
class OmDetTurboCSPRepLayer(nn.Module): """ Cross Stage Partial (CSP) network layer with RepVGG blocks. """ def __init__(self, config: OmDetTurboConfig): super().__init__() in_channels = config.encoder_hidden_dim * 2 out_channels = config.encoder_hidden_dim num_blocks = 3 activation = config.csp_activation hidden_channels = int(out_channels * config.hidden_expansion) self.conv1 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.conv2 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation) self.bottlenecks = nn.Sequential(*[OmDetTurboRepVggBlock(config) for _ in range(num_blocks)]) if hidden_channels != out_channels: self.conv3 = OmDetTurboConvNormLayer(config, hidden_channels, out_channels, 1, 1, activation=activation) else: self.conv3 = nn.Identity() def forward(self, hidden_state): device = hidden_state.device hidden_state_1 = self.conv1(hidden_state) hidden_state_1 = self.bottlenecks(hidden_state_1).to(device) hidden_state_2 = self.conv2(hidden_state).to(device) return self.conv3(hidden_state_1 + hidden_state_2)
class_definition
25,213
26,503
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,392
class OmDetTurboMultiheadAttention(nn.Module): """Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.""" def __init__(self, config, hidden_size, num_attention_heads, dropout): super().__init__() if hidden_size % num_attention_heads != 0: raise ValueError( f"The hidden size ({hidden_size}) is not a multiple of the number of attention " f"heads ({num_attention_heads})" ) self.num_attention_heads = num_attention_heads self.attention_head_size = int(hidden_size / num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.out_proj = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(queries)) key_layer = self.transpose_for_scores(self.key(keys)) value_layer = self.transpose_for_scores(self.value(values)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) context_layer = self.out_proj(context_layer) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class_definition
26,506
29,352
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,393
class OmDetTurboEncoderLayer(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.self_attn = OmDetTurboMultiheadAttention( config, hidden_size=config.encoder_hidden_dim, num_attention_heads=config.num_attention_heads, dropout=config.encoder_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.encoder_dropout) self.activation_fn = ACT2FN[config.encoder_feedforward_activation] self.encoder_feedforward_dropout = nn.Dropout(config.encoder_feedforward_dropout) self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_dim_feedforward) self.fc2 = nn.Linear(config.encoder_dim_feedforward, config.encoder_hidden_dim) self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps) @staticmethod def with_pos_embed(tensor, pos_embed): return tensor if pos_embed is None else tensor + pos_embed def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states query = key = self.with_pos_embed(hidden_states, position_embeddings) hidden_states = self.self_attn( queries=query, keys=key, values=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states, attentions = hidden_states if output_attentions else (hidden_states[0], None) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.encoder_feedforward_dropout(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) if output_attentions: return hidden_states, attentions return (hidden_states,)
class_definition
29,355
32,778
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,394
class OmDetTurboEncoder(nn.Module): def __init__(self, config: OmDetTurboConfig): super().__init__() self.layers = nn.ModuleList([OmDetTurboEncoderLayer(config) for _ in range(config.encoder_layers)]) def forward( self, src, src_mask=None, pos_embed=None, output_attentions: bool = False ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: hidden_states = src attention = () if output_attentions else None for layer in self.layers: hidden_states = layer( hidden_states, attention_mask=src_mask, position_embeddings=pos_embed, output_attentions=output_attentions, ) if output_attentions: attention = attention + (hidden_states[1],) hidden_states = hidden_states[0] return hidden_states, attention
class_definition
32,781
33,677
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,395
class OmDetTurboHybridEncoder(nn.Module): """ Encoder consisting of channel projection layers, a set of `OmDetTurboEncoder`, a top-down Feature Pyramid Network (FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://arxiv.org/abs/2304.08069 Args: config: OmDetTurboConfig """ def __init__(self, config: OmDetTurboConfig): super().__init__() self.config = config self.in_channels = config.encoder_in_channels self.encoder_hidden_dim = config.encoder_hidden_dim self.encoder_projection_indices = config.encoder_projection_indices self.positional_encoding_temperature = config.positional_encoding_temperature self.eval_size = config.eval_size self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels] self.channel_projection_layers = nn.ModuleList() for in_channel in self.in_channels: self.channel_projection_layers.append( nn.Sequential( nn.Conv2d(in_channel, self.encoder_hidden_dim, kernel_size=(1, 1), bias=False), nn.BatchNorm2d(self.encoder_hidden_dim), ) ) # encoder transformer self.encoder = nn.ModuleList([OmDetTurboEncoder(config) for _ in range(len(self.encoder_projection_indices))]) # top-down fpn self.lateral_convs = nn.ModuleList() self.fpn_blocks = nn.ModuleList() for _ in range(len(self.in_channels) - 1, 0, -1): self.lateral_convs.append( OmDetTurboConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=1, stride=1, activation=config.conv_norm_activation, ) ) self.fpn_blocks.append(OmDetTurboCSPRepLayer(config)) # bottom-up pan self.downsample_convs = nn.ModuleList() self.pan_blocks = nn.ModuleList() for _ in range(len(self.in_channels) - 1): self.downsample_convs.append( OmDetTurboConvNormLayer( config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=3, stride=2, activation=config.conv_norm_activation, ) ) self.pan_blocks.append(OmDetTurboCSPRepLayer(config)) @staticmethod def build_2d_sincos_position_embedding( width, height, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32 ): grid_w = torch.arange(int(width), dtype=dtype, device=device) grid_h = torch.arange(int(height), dtype=dtype, device=device) grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij") if embed_dim % 4 != 0: raise ValueError("Embed dimension must be divisible by 4 for 2D sin-cos position embedding") pos_dim = embed_dim // 4 omega = torch.arange(pos_dim, dtype=dtype, device=device) / pos_dim omega = 1.0 / (temperature**omega) out_w = grid_w.flatten()[..., None] @ omega[None] out_h = grid_h.flatten()[..., None] @ omega[None] return torch.concat([out_w.sin(), out_w.cos(), out_h.sin(), out_h.cos()], dim=1)[None, :, :] def forward( self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layers) that is passed to the encoder. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeddings encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # get projection features projected_features = [self.channel_projection_layers[i](feature) for i, feature in enumerate(hidden_states)] # encoder for encoder_layer_index, feature_to_project_index in enumerate(self.encoder_projection_indices): if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) height, width = projected_features[feature_to_project_index].shape[2:] # flatten [batch, channel, height, width] to [batch, height*width, channel] src_flatten = projected_features[feature_to_project_index].flatten(2).permute(0, 2, 1) if self.training or self.eval_size is None: pos_embed = self.build_2d_sincos_position_embedding( width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype, ).to(src_flatten.device, src_flatten.dtype) else: pos_embed = None layer_outputs = self.encoder[encoder_layer_index]( src_flatten, pos_embed=pos_embed, output_attentions=output_attentions, ) projected_features[feature_to_project_index] = ( layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous() ) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) # Feature Pyramid Network (FPN) fpn_feature_maps = [projected_features[-1]] for idx in range(len(self.in_channels) - 1, 0, -1): feat_high = fpn_feature_maps[0] feat_low = projected_features[idx - 1] feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx](feat_high) fpn_feature_maps[0] = feat_high upsample_feat = F.interpolate(feat_high, scale_factor=2.0, mode="nearest") fps_map = self.fpn_blocks[len(self.in_channels) - 1 - idx](torch.concat([upsample_feat, feat_low], dim=1)) fpn_feature_maps.insert(0, fps_map) # Path Aggregation Network (PAN) fpn_states = [fpn_feature_maps[0]] for idx in range(len(self.in_channels) - 1): feat_low = fpn_states[-1] feat_high = fpn_feature_maps[idx + 1] downsample_feat = self.downsample_convs[idx](feat_low) hidden_states = self.pan_blocks[idx]( torch.concat([downsample_feat, feat_high.to(downsample_feat.device)], dim=1) ) fpn_states.append(hidden_states) if not return_dict: return (fpn_states[-1], encoder_states, all_attentions, fpn_states) return OmDetTurboEncoderOutput( last_hidden_state=fpn_states[-1], hidden_states=encoder_states, attentions=all_attentions, extracted_states=fpn_states, )
class_definition
33,680
41,907
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,396
class OmDetTurboMLPWithDropout(nn.Module): def __init__(self, config): super().__init__() self.linear1 = nn.Linear(config.class_embed_dim, config.task_encoder_hidden_dim) self.activation = ACT2FN[config.decoder_activation] self.dropout = nn.Dropout(config.decoder_dropout) self.linear2 = nn.Linear(config.task_encoder_hidden_dim, config.class_embed_dim) def forward(self, x): return self.linear2(self.dropout(self.activation(self.linear1(x))))
class_definition
41,910
42,410
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,397
class OmDetTurboMLP(nn.Module): """Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers hidden_layers_dims = [hidden_dim] * (num_layers - 1) layers_dims = [input_dim] + hidden_layers_dims + [output_dim] self.layers = nn.ModuleList( [nn.Linear(in_dim, out_dim) for in_dim, out_dim in zip(layers_dims[:-1], layers_dims[1:])] ) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x
class_definition
42,413
43,090
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,398
class OmDetTurboResidualLayer(nn.Module): """ A residual connection followed by a layer norm. """ def __init__(self, config): super().__init__() self.norm1 = nn.LayerNorm(config.class_embed_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.decoder_dropout) def forward(self, x, y): return self.norm1(x + self.dropout(y))
class_definition
43,093
43,482
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
null
3,399