text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class Kosmos2TextModel(Kosmos2PreTrainedModel): config_class = Kosmos2TextConfig def __init__(self, config: Kosmos2TextConfig): super().__init__(config) self.model = Kosmos2TextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=Kosmos2TextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Returns: """ return self.model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
72,745
75,099
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/modeling_kosmos2.py
null
6,100
class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel, GenerationMixin): config_class = Kosmos2TextConfig _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: Kosmos2TextConfig): super().__init__(config) self.model = Kosmos2TextTransformer(config) self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=Kosmos2TextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) ) if not return_dict: output = (lm_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, image_embeds=None, image_embeds_position_mask=None, past_key_values=None, attention_mask=None, use_cache=None, **model_kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) position_ids = None # cut input_ids if past_key_values is used if past_key_values is not None: position_ids = create_position_ids_from_input_ids( input_ids, padding_idx=self.config.pad_token_id, past_key_values_length=0, )[:, -1:] input_ids = input_ids[:, -1:] # the image info. is already encoded into the past keys/values image_embeds = None image_embeds_position_mask = None elif image_embeds_position_mask is not None: # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation) batch_size, seq_len = input_ids.size() mask_len = image_embeds_position_mask.size()[-1] image_embeds_position_mask = torch.cat( ( image_embeds_position_mask, torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device), ), dim=1, ) return { "input_ids": input_ids, "image_embeds": image_embeds, "image_embeds_position_mask": image_embeds_position_mask, "past_key_values": past_key_values, "attention_mask": attention_mask, "position_ids": position_ids, "use_cache": use_cache, } @staticmethod # Copied from transformers.models.umt5.modeling_umt5.UMT5ForConditionalGeneration._reorder_cache def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
class_definition
75,305
82,396
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/modeling_kosmos2.py
null
6,101
class Kosmos2ImageToTextProjection(nn.Module): """The layer that transforms the image model's output to part of the text model's input (namely, image features)""" def __init__(self, config: Kosmos2Config): super().__init__() self.dense = nn.Linear(config.vision_config.hidden_size, config.text_config.embed_dim) self.latent_query = nn.Parameter(torch.randn(config.latent_query_num, config.text_config.embed_dim)) self.x_attn = KosmosTextAttention( config.text_config, config.text_config.embed_dim, config.text_config.attention_heads, dropout=config.text_config.attention_dropout, is_decoder=False, add_inner_attn_layernorm=False, ) def forward(self, features): hidden_states = self.dense(features) # shape = [batch, latent_query_num, h_dim] latent_query = self.latent_query.unsqueeze(0).expand(hidden_states.size(0), -1, -1) key_value_states = torch.cat([hidden_states, latent_query], dim=1) hidden_states, attn_weights, _ = self.x_attn( hidden_states=latent_query, encoder_hidden_states=key_value_states, past_key_value=None, attention_mask=None, output_attentions=None, ) return hidden_states, attn_weights
class_definition
82,399
83,753
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/modeling_kosmos2.py
null
6,102
class Kosmos2Model(Kosmos2PreTrainedModel): config_class = Kosmos2Config main_input_name = "pixel_values" def __init__(self, config: Kosmos2Config): super().__init__(config) self.text_model = Kosmos2TextModel(config.text_config) self.vision_model = Kosmos2VisionModel(config.vision_config) self.image_to_text_projection = Kosmos2ImageToTextProjection(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.model.embed_tokens def set_input_embeddings(self, value): self.text_model.model.embed_tokens = value @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Kosmos2ModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, image_embeds: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, Kosmos2ModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Kosmos2Model >>> model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224") >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = ( ... "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863>" ... "</object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911>" ... "</object>" ... ) >>> inputs = processor(text=text, images=image, return_tensors="pt", add_eos_token=True) >>> last_hidden_state = model( ... pixel_values=inputs["pixel_values"], ... input_ids=inputs["input_ids"], ... attention_mask=inputs["attention_mask"], ... image_embeds_position_mask=inputs["image_embeds_position_mask"], ... ).last_hidden_state >>> list(last_hidden_state.shape) [1, 91, 2048] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_model_output = None projection_attentions = None if image_embeds is None: if pixel_values is None: raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") vision_model_output = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: outputs = outputs + (image_embeds, projection_attentions, vision_model_output) return tuple(output for output in outputs if output is not None) return Kosmos2ModelOutput( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_embeds=image_embeds, projection_attentions=projection_attentions, vision_model_output=vision_model_output, )
class_definition
83,947
89,393
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/modeling_kosmos2.py
null
6,103
class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel, GenerationMixin): config_class = Kosmos2Config main_input_name = "pixel_values" _tied_weights_keys = ["text_model.lm_head.weight"] def __init__(self, config: Kosmos2Config): super().__init__(config) self.text_model = Kosmos2TextForCausalLM(config.text_config) self.vision_model = Kosmos2VisionModel(config.vision_config) self.image_to_text_projection = Kosmos2ImageToTextProjection(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.model.embed_tokens def set_input_embeddings(self, value): self.text_model.model.embed_tokens = value def get_output_embeddings(self) -> nn.Module: return self.text_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.text_model.set_output_embeddings(new_embeddings) @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Kosmos2ForConditionalGenerationModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, image_embeds: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Kosmos2ForConditionalGenerationModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration >>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224") >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "<grounding> An image of" >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> generated_ids = model.generate( ... pixel_values=inputs["pixel_values"], ... input_ids=inputs["input_ids"], ... attention_mask=inputs["attention_mask"], ... image_embeds=None, ... image_embeds_position_mask=inputs["image_embeds_position_mask"], ... use_cache=True, ... max_new_tokens=64, ... ) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False) >>> processed_text '<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.' >>> caption, entities = processor.post_process_generation(generated_text) >>> caption 'An image of a snowman warming himself by a fire.' >>> entities [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_model_output = None projection_attentions = None if image_embeds is None: if pixel_values is None: raise ValueError("You have to specify either `pixel_values` or `image_embeds`.") vision_model_output = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) lm_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, position_ids=position_ids, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: outputs = lm_outputs + (image_embeds, projection_attentions, vision_model_output) return tuple(output for output in outputs if output is not None) return Kosmos2ForConditionalGenerationModelOutput( loss=lm_outputs.loss, logits=lm_outputs.logits, past_key_values=lm_outputs.past_key_values, hidden_states=lm_outputs.hidden_states, attentions=lm_outputs.attentions, image_embeds=image_embeds, projection_attentions=projection_attentions, vision_model_output=vision_model_output, ) def generate( self, pixel_values: Optional[torch.Tensor] = None, image_embeds_position_mask: Optional[torch.Tensor] = None, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, **kwargs, ): # in order to allow `inputs` argument (as in `GenerationMixin`) inputs = kwargs.pop("inputs", None) if pixel_values is not None and inputs is not None: raise ValueError( f"`inputs`: {inputs} were passed alongside `pixel_values` which is not allowed." f"Make sure to either pass `inputs` or pixel_values=..." ) if pixel_values is None and inputs is not None: pixel_values = inputs if image_embeds is None: vision_model_output = self.vision_model(pixel_values) # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`. image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) # normalized features image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) output = self.text_model.generate( input_ids=input_ids, attention_mask=attention_mask, image_embeds=image_embeds, image_embeds_position_mask=image_embeds_position_mask, **kwargs, ) return output
class_definition
89,606
98,036
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/modeling_kosmos2.py
null
6,104
class Kosmos2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65037): Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Kosmos2Model`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). embed_dim (`int`, *optional*, defaults to 2048): Dimensionality of the layers and the pooler layer. layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. ffn_dim (`int`, *optional*, defaults to 8192): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(embed_dim). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). pad_token_id (`int`, *optional*, defaults to 1): Token id used for padding. bos_token_id (`int`, *optional*, defaults to 0): Token id used for beginning of string. eos_token_id (`int`, *optional*, defaults to 2): Token id used for end of string. ```""" model_type = "kosmos_2_text_model" base_config_key = "text_config" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "embed_dim", "num_hidden_layers": "layers", } def __init__( self, vocab_size=65037, max_position_embeddings=2048, embed_dim=2048, layers=24, ffn_dim=8192, attention_heads=32, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, layer_norm_eps=1e-5, init_std=0.02, scale_embedding=True, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.embed_dim = embed_dim self.layers = layers self.ffn_dim = ffn_dim self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.init_std = init_std self.scale_embedding = scale_embedding self.use_cache = use_cache
class_definition
808
5,981
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/configuration_kosmos2.py
null
6,105
class Kosmos2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). ```""" model_type = "kosmos_2_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=1024, intermediate_size=4096, num_hidden_layers=24, num_attention_heads=16, num_channels=3, image_size=224, patch_size=14, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act
class_definition
5,984
9,575
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/configuration_kosmos2.py
null
6,106
class Kosmos2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`]. latent_query_num (`int`, *optional*, defaults to 64): The number of latent query tokens that represent the image features used in the text decoder component. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Kosmos2Config, Kosmos2Model >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration >>> configuration = Kosmos2Config() >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration >>> model = Kosmos2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "kosmos-2" sub_configs = {"text_config": Kosmos2TextConfig, "vision_config": Kosmos2VisionConfig} def __init__( self, text_config=None, vision_config=None, latent_query_num=64, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.") self.text_config = Kosmos2TextConfig(**text_config) self.vision_config = Kosmos2VisionConfig(**vision_config) self.latent_query_num = latent_query_num
class_definition
9,578
11,849
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/kosmos2/configuration_kosmos2.py
null
6,107
class TFBlipTextEmbeddings(keras.layers.Layer): """Construct the embeddings from word and position embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.word_embeddings = keras.layers.Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="word_embeddings", ) self.position_embeddings = keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="position_embeddings", ) # self.LayerNorm is not snake-cased to stick with PyTorch model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.position_ids = tf.expand_dims(tf.range(config.max_position_embeddings), 0) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def call(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, training=None): if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "word_embeddings", None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, "position_embeddings", None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None)
class_definition
3,100
6,120
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,108
class TFBlipTextSelfAttention(keras.layers.Layer): def __init__(self, config, is_cross_attention, **kwargs): super().__init__(**kwargs) self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = keras.layers.Embedding( 2 * config.max_position_embeddings - 1, self.attention_head_size ) self.is_cross_attention = is_cross_attention def transpose_for_scores(self, x): new_x_shape = tf.concat( [tf.shape(x)[:-1], tf.constant([self.num_attention_heads, self.attention_head_size], dtype=tf.int32)], axis=0, ) x = tf.reshape(x, new_x_shape) return tf.transpose(x, perm=(0, 2, 1, 3)) def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=None, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = shape_list(hidden_states)[1] position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 1) position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 0) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function) attention_scores = attention_scores + tf.cast(attention_mask, attention_scores.dtype) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = attention_probs_dropped @ value_layer context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if self.is_cross_attention: if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.encoder_hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.encoder_hidden_size]) else: if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size])
class_definition
6,201
13,642
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,109
class TFBlipTextSelfOutput(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size])
class_definition
13,645
14,989
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,110
class TFBlipTextAttention(keras.layers.Layer): def __init__(self, config, is_cross_attention=False, **kwargs): super().__init__(**kwargs) self.self = TFBlipTextSelfAttention(config, is_cross_attention, name="self") # "output" is a protected attribute on TF models self.self_output = TFBlipTextSelfOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, output_attentions: Optional[bool] = False, training: Optional[bool] = None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training=training, ) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None)
class_definition
15,070
16,762
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,111
class TFBlipTextIntermediate(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
class_definition
16,860
17,890
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,112
class TFBlipTextOutput(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size])
class_definition
17,893
19,230
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,113
class TFBlipTextLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.attention = TFBlipTextAttention(config, name="attention") if self.config.is_decoder: self.crossattention = TFBlipTextAttention( config, is_cross_attention=self.config.is_decoder, name="crossattention" ) self.intermediate = TFBlipTextIntermediate(config, name="intermediate") self.self_output = TFBlipTextOutput(config, name="output") def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.self_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None)
class_definition
19,233
22,262
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,114
class TFBlipTextEncoder(keras.layers.Layer): config_class = BlipTextConfig def __init__(self, config, name=None, **kwargs): super().__init__(name=name, **kwargs) self.config = config self.layer = [TFBlipTextLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] @unpack_inputs def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.is_decoder else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None)
class_definition
22,364
25,421
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,115
class TFBlipTextPooler(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size])
class_definition
25,513
26,490
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,116
class TFBlipTextPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size])
class_definition
26,599
28,004
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,117
class TFBlipTextLMPredictionHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFBlipTextPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = keras.layers.Dense( config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="decoder", use_bias=False, ) self.config = config def build(self, input_shape=None): self.bias = self.add_weight(name="bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True) if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build([None, None, self.config.hidden_size]) def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states
class_definition
28,007
29,336
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,118
class TFBlipTextOnlyMLMHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.predictions = TFBlipTextLMPredictionHead(config, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None)
class_definition
29,339
29,970
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,119
class TFBlipTextPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BlipTextConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"]
class_definition
30,052
30,370
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,120
class TFBlipTextModel(TFBlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs): super().__init__(config, name=name, **kwargs) self.config = config self.embeddings = TFBlipTextEmbeddings(config, name="embeddings") self.encoder = TFBlipTextEncoder(config, name="encoder") self.pooler = TFBlipTextPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @tf.function def get_extended_attention_mask( self, attention_mask: tf.Tensor, input_shape: Tuple[int], is_decoder: bool ) -> tf.Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`tf.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. is_decoder (`bool`): Whether the model is used as a decoder. Returns: `tf.Tensor` The extended attention mask, with the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if not isinstance(attention_mask, tf.Tensor): attention_mask = tf.convert_to_tensor(attention_mask) # Catches NumPy inputs that haven't been cast yet if attention_mask.shape.rank == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.shape.rank == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = tf.range(seq_length, dtype=attention_mask.dtype) causal_mask = tf.broadcast_to(seq_ids, (batch_size, seq_length, seq_length)) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask if shape_list(causal_mask)[1] < shape_list(attention_mask)[1]: prefix_seq_len = tf.shape(attention_mask)[1] - tf.shape(causal_mask)[1] causal_mask = tf.concat( [ tf.ones((batch_size, seq_length, prefix_seq_len), dtype=causal_mask.dtype), causal_mask, ], axis=-1, ) extended_attention_mask = ( tf.cast(causal_mask[:, None, :, :], attention_mask.dtype) * attention_mask[:, None, None, :] ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, encoder_embeds: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, is_decoder: bool = False, training: bool = False, ) -> Tuple[tf.Tensor] | TFBaseModelOutputWithPoolingAndCrossAttentions: r""" encoder_hidden_states (`tf.Tensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] batch_size, seq_length = input_shape elif encoder_embeds is not None: input_shape = shape_list(encoder_embeds)[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = tf.ones(((batch_size, seq_length + past_key_values_length))) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: tf.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, is_decoder) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states[0]) else: encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states) encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = tf.ones(encoder_hidden_shape) encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) else: embedding_output = encoder_embeds encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None)
class_definition
30,488
42,796
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,121
class TFBlipTextLMHeadModel(TFBlipTextPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.bert = TFBlipTextModel(config, add_pooling_layer=False, name="bert") self.cls = TFBlipTextOnlyMLMHead(config, name="cls") self.label_smoothing = config.label_smoothing def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) @unpack_inputs def call( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=True, training=None, ): r""" encoder_hidden_states (`tf.Tensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`tf.Tensor`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(tf.Tensor))`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, training=training, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :] lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :] shifted_prediction_scores = tf.reshape(shifted_prediction_scores, (-1, self.config.vocab_size)) labels = labels[:, 1:] labels = tf.reshape(labels, (-1,)) # Keras won't give us label smoothing for sparse CE, so we de-sparsify things here # Use relu to clamp masked labels at 0 to avoid NaN (we will be zeroing those out later anyway) one_hot_labels = tf.one_hot(tf.nn.relu(labels), depth=self.config.vocab_size, dtype=tf.float32) loss_fct = keras.losses.CategoricalCrossentropy( from_logits=True, label_smoothing=self.label_smoothing, reduction="none" ) masked_positions = tf.cast(tf.not_equal(labels, -100), dtype=tf.float32) lm_loss = loss_fct(one_hot_labels, shifted_prediction_scores) lm_loss *= masked_positions lm_loss = tf.reduce_sum(lm_loss, axis=0) / tf.math.count_nonzero(masked_positions, dtype=tf.float32) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values, "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), "is_decoder": True, } def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "bert", None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None)
class_definition
42,878
49,971
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip_text.py
null
6,122
class BlipProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, "images_kwargs": {}, }
class_definition
878
1,370
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/processing_blip.py
null
6,123
class BlipProcessor(ProcessorMixin): r""" Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = [] image_processor_class = "BlipImageProcessor" tokenizer_class = ("BertTokenizer", "BertTokenizerFast") def __init__(self, image_processor, tokenizer, **kwargs): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__( self, images: ImageInput = None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[BlipProcessorKwargs], ) -> BatchEncoding: """ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") text_encoding = None # add pixel_values encoding. If we also have text_encoding, update image encoding and return it. # else, return the text encoding. output_kwargs = self._merge_kwargs( BlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if text is not None: text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) if images is not None: encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor return text_encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class_definition
1,373
5,866
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/processing_blip.py
null
6,124
class BlipImageProcessor(BaseImageProcessor): r""" Constructs a BLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 384, "width": 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, do_convert_rgb: bool = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs
class_definition
1,374
15,224
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/image_processing_blip.py
null
6,125
class BlipTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `BlipText` used by the [base architectures](https://huggingface.co/Salesforce/blip-vqa-base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers from the vision model. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. bos_token_id (`int`, *optional*, defaults to 30522): The id of the `beginning-of-sequence` token. eos_token_id (`int`, *optional*, defaults to 2): The id of the `end-of-sequence` token. pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). label_smoothing (float, *optional*): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`. Example: ```python >>> from transformers import BlipTextConfig, BlipTextModel >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipTextConfig() >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_text_model" base_config_key = "text_config" def __init__( self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act="gelu", layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, label_smoothing=0.0, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_hidden_size = encoder_hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.hidden_dropout_prob = hidden_dropout_prob self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.is_decoder = is_decoder self.use_cache = use_cache self.label_smoothing = label_smoothing
class_definition
781
6,816
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/configuration_blip.py
null
6,126
class BlipVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Blip-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import BlipVisionConfig, BlipVisionModel >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipVisionConfig() >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=1e-10, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act
class_definition
6,819
10,433
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/configuration_blip.py
null
6,127
class BlipConfig(PretrainedConfig): r""" [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original BLIP implementation. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. label_smoothing (float, optional, *optional*, defaults to 0.0): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import BlipConfig, BlipModel >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipConfig() >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig >>> # Initializing a BLIPText and BLIPVision configuration >>> config_text = BlipTextConfig() >>> config_vision = BlipVisionConfig() >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "blip" sub_configs = {"text_config": BlipTextConfig, "vision_config": BlipVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, label_smoothing=0.0, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.") self.text_config = BlipTextConfig(**text_config) self.vision_config = BlipVisionConfig(**vision_config) self.text_config.encoder_hidden_size = self.vision_config.hidden_size self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 self.image_text_hidden_size = image_text_hidden_size self.label_smoothing = label_smoothing @classmethod def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs): r""" Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model configuration. Returns: [`BlipConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
class_definition
10,436
14,828
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/configuration_blip.py
null
6,128
class TFBlipForConditionalGenerationModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. Args: loss (`tf.Tensor`, *optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Languge modeling loss from the text decoder. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head of the text decoder model. image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)`, *optional*): The image embeddings obtained after applying the Vision Transformer model to the input image. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.` """ loss: Tuple[tf.Tensor] | None = None logits: Tuple[tf.Tensor] | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @property def decoder_logits(self): warnings.warn( "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers." " Please use the `logits` attribute to retrieve the final output instead.", FutureWarning, ) return self.logits
class_definition
2,211
4,632
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,129
class TFBlipTextVisionModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Languge modeling loss from the text decoder. image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None
class_definition
4,646
6,609
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,130
class TFBlipImageTextMatchingModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity scores. Args: itm_score (`tf.Tensor`): The image-text similarity scores. loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Languge modeling loss from the text decoder. image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. vision_pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*): Last layer hidden-state of the vision of the vision-only branch of the model. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. question_embeds (`tf.Tensor`): The question embeddings obtained by the text projection layer. """ itm_score: tf.Tensor | None = None loss: tf.Tensor | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None vision_pooler_output: tf.Tensor | None = None attentions: Tuple[tf.Tensor, ...] | None = None question_embeds: Tuple[tf.Tensor] | None = None
class_definition
6,623
9,158
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,131
class TFBlipOutput(ModelOutput): """ Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. text_model_output(`BaseModelOutputWithPooling`): The output of the [`BlipTextModel`]. vision_model_output(`BaseModelOutputWithPooling`): The output of the [`BlipVisionModel`]. """ loss: tf.Tensor | None = None logits_per_image: tf.Tensor = None logits_per_text: tf.Tensor = None text_embeds: tf.Tensor = None image_embeds: tf.Tensor = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
9,172
10,953
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,132
class TFBlipVisionEmbeddings(keras.layers.Layer): def __init__(self, config: BlipVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = keras.layers.Conv2D( filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, kernel_initializer=get_initializer(self.config.initializer_range), data_format="channels_last", name="patch_embedding", ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 def build(self, input_shape=None): self.class_embedding = self.add_weight( shape=(1, 1, self.embed_dim), initializer=get_initializer(self.config.initializer_range), trainable=True, name="class_embedding", ) self.position_embedding = self.add_weight( shape=(1, self.num_positions, self.embed_dim), initializer=get_initializer(self.config.initializer_range), trainable=True, name="position_embedding", ) if self.built: return self.built = True if getattr(self, "patch_embedding", None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, 3]) def call(self, pixel_values: tf.Tensor) -> tf.Tensor: # Input is channels-first, we transpose. PyTorch transposes after the conv because PyTorch # likes channels-first convs. batch_size = tf.shape(pixel_values)[0] pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) patch_embeds = self.patch_embedding(pixel_values) patch_embeds = tf.reshape(patch_embeds, (batch_size, self.num_patches, -1)) class_embeds = tf.broadcast_to(self.class_embedding, (batch_size, 1, self.embed_dim)) embeddings = tf.concat([class_embeds, patch_embeds], axis=1) embeddings = embeddings + self.position_embedding[:, : tf.shape(embeddings)[1], :] return embeddings
class_definition
10,956
13,242
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,133
class TFBlipTextEmbeddings(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape = None): with tf.name_scope("token_embedding"): self.weight = self.add_weight( shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="weight", ) with tf.name_scope("position_embedding"): self.position_embedding = self.add_weight( shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="embeddings", ) super().build(input_shape) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings
class_definition
13,338
15,474
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,134
class TFBlipAttention(keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = keras.layers.Dropout(config.attention_dropout, name="dropout") self.qkv = keras.layers.Dense( 3 * self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="qkv" ) self.projection = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="projection" ) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, training: Optional[bool] = None, ) -> Tuple[tf.Tensor, tf.Tensor | None, Tuple[tf.Tensor] | None]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = shape_list(hidden_states) mixed_qkv = self.qkv(hidden_states) mixed_qkv = tf.reshape(mixed_qkv, (bsz, tgt_len, 3, self.num_heads, self.head_dim)) mixed_qkv = tf.transpose(mixed_qkv, perm=(2, 0, 3, 1, 4)) query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = query_states @ tf.transpose(key_states, (0, 1, 3, 2)) attention_scores = attention_scores * self.scale # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.transpose(attention_probs @ value_states, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.embed_dim] context_layer = tf.reshape(context_layer, new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "qkv", None) is not None: with tf.name_scope(self.qkv.name): self.qkv.build([None, None, self.embed_dim]) if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, self.embed_dim])
class_definition
15,477
18,952
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,135
class TFBlipMLP(keras.layers.Layer): def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.activation_fn = get_tf_activation(config.hidden_act) in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) fc_std = (2 * config.hidden_size) ** -0.5 self.fc1 = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1" ) self.fc2 = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2" ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(inputs=hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.config.hidden_size]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.intermediate_size])
class_definition
18,955
20,322
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,136
class TFBlipEncoderLayer(keras.layers.Layer): def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFBlipAttention(config, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.mlp = TFBlipMLP(config, name="mlp") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, output_attentions: Optional[bool] = False, training: Optional[bool] = None, ) -> Tuple[tf.Tensor]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim])
class_definition
20,325
23,081
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,137
class TFBlipPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BlipConfig base_model_prefix = "blip" _keys_to_ignore_on_load_missing = [r"position_ids"]
class_definition
23,084
23,394
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,138
class TFBlipEncoder(keras.layers.Layer): config_class = BlipConfig """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`. """ def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layers = [TFBlipEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)] @unpack_inputs def call( self, inputs_embeds, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBaseModelOutput]: r""" Args: inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
27,169
30,950
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,139
class TFBlipVisionModel(TFBlipPreTrainedModel): main_input_name = "pixel_values" config_class = BlipVisionConfig def __init__(self, config: BlipVisionConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.config = config self.embeddings = TFBlipVisionEmbeddings(config, name="embeddings") self.encoder = TFBlipEncoder(config, name="encoder") self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm") self.embed_dim = config.hidden_size def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling( last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hs, attentions=attns, ) @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=BlipVisionConfig) def call( self, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] # TF gets confused if we call the layer with inputs of different ranks, so insert a singleton dimension pooled_output = self.post_layernorm(tf.expand_dims(pooled_output, 1)) pooled_output = tf.squeeze(pooled_output, 1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "post_layernorm", None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, None, self.embed_dim])
class_definition
30,953
34,812
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,140
class TFBlipMainLayer(keras.layers.Layer): config_class = BlipConfig def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.text_config, BlipTextConfig): raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = TFBlipTextModel(text_config, name="text_model") self.vision_model = TFBlipVisionModel(vision_config, name="vision_model") self.visual_projection = keras.layers.Dense( self.projection_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name="visual_projection", ) self.text_projection = keras.layers.Dense( self.projection_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name="text_projection", ) self.config = config def build(self, input_shape=None): self.logit_scale = self.add_weight( name="logit_scale", shape=[], initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True, ) if self.built: return self.built = True if getattr(self, "text_model", None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "visual_projection", None) is not None: with tf.name_scope(self.visual_projection.name): self.visual_projection.build([None, None, self.vision_embed_dim]) if getattr(self, "text_projection", None) is not None: with tf.name_scope(self.text_projection.name): self.text_projection.build([None, None, self.text_embed_dim]) @unpack_inputs def call( self, input_ids: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBlipOutput]: # Use BLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / tf.norm(image_embeds, ord=2, axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(text_embeds, ord=2, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = tf.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) loss = None if return_loss: loss = blip_loss(logits_per_text) loss = tf.reshape(loss, (1,)) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return TFBlipOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
class_definition
34,815
40,356
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,141
class TFBlipModel(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"] main_input_name = "input_ids" def __init__(self, config: BlipConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.blip = TFBlipMainLayer(config, name="blip") def serving_output(self, output: TFBlipOutput) -> TFBlipOutput: return TFBlipOutput( logits_per_image=output.logits_per_image, logits_per_text=output.logits_per_text, text_embeds=output.text_embeds, image_embeds=output.image_embeds, ) @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipOutput, config_class=BlipConfig) def call( self, input_ids: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBlipOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipModel >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities ```""" outputs = self.blip( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, return_dict: Optional[bool] = None, ) -> tf.Tensor: r""" Returns: text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFBlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, TFBlipModel >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") >>> text_features = model.get_text_features(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.blip.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.blip.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: tf.Tensor | None = None, return_dict: Optional[bool] = None, ) -> tf.Tensor: r""" Returns: image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFBlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipModel >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> image_features = model.get_image_features(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.blip.vision_model(pixel_values=pixel_values, return_dict=return_dict) pooled_output = vision_outputs[1] # pooled_output image_features = self.blip.visual_projection(pooled_output) return image_features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "blip", None) is not None: with tf.name_scope(self.blip.name): self.blip.build(None)
class_definition
40,359
46,208
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,142
class TFBlipForConditionalGeneration(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"] main_input_name = "pixel_values" def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model") self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder") self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipForConditionalGenerationModelOutput, config_class=BlipConfig) def call( self, pixel_values: tf.Tensor, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: tf.Tensor | None = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A picture of" >>> inputs = processor(images=image, text=text, return_tensors="tf") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[0] outputs = self.text_decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, return_dict=False, training=training, ) if not return_dict: outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple(output for output in outputs if output is not None) if labels is not None: loss = outputs[0] logits = outputs[1] else: loss = None logits = outputs[0] if loss is not None and loss.shape.rank == 0: loss = tf.reshape(loss, (1,)) return TFBlipForConditionalGenerationModelOutput( loss=loss, logits=logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) def generate( self, pixel_values: tf.Tensor, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, **generate_kwargs, ) -> tf.Tensor: r""" Overrides *generate* function to be able to use the model as a conditional generator Parameters: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`: Input image to be processed input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) two cats sleeping on a couch ``` """ batch_size = pixel_values.shape[0] vision_outputs = self.vision_model(pixel_values=pixel_values) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32) if isinstance(input_ids, list): input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int32) elif input_ids is None: input_ids = tf.convert_to_tensor( [[self.decoder_input_ids, self.config.text_config.eos_token_id]], dtype=tf.int32 ) input_ids = tf.tile(input_ids, (batch_size, 1)) # PyTorch: input_ids[:, 0] = self.config.text_config.bos_token_id input_ids = tf.concat( [tf.ones((batch_size, 1), dtype=tf.int32) * self.config.text_config.bos_token_id, input_ids[:, 1:]], axis=1 ) attention_mask = attention_mask[:, :-1] if attention_mask is not None else None outputs = self.text_decoder.generate( input_ids=input_ids[:, :-1], eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **generate_kwargs, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "text_decoder", None) is not None: with tf.name_scope(self.text_decoder.name): self.text_decoder.build(None)
class_definition
46,740
53,749
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,143
class TFBlipForQuestionAnswering(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"] def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model") self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False) self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder") self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding # Adapted from transformers.models.t5.modeling_tf_t5.TFT5PreTrainedModel._shift_right def _shift_right(self, input_ids): decoder_start_token_id = self.decoder_start_token_id pad_token_id = self.decoder_pad_token_id if decoder_start_token_id is None or pad_token_id is None: raise ValueError("decoder_start_token_id and pad_token_id must be defined!") start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)) return shifted_input_ids @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipTextVisionModelOutput, config_class=BlipVisionConfig) def call( self, input_ids: tf.Tensor, pixel_values: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: tf.Tensor | None = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBlipTextVisionModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # training >>> text = "How many cats are in the picture?" >>> label = "2" >>> inputs = processor(images=image, text=text, return_tensors="tf") >>> labels = processor(text=label, return_tensors="tf").input_ids >>> inputs["labels"] = labels >>> outputs = model(**inputs) >>> loss = outputs.loss >>> # inference >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="tf") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ```""" if labels is None and decoder_input_ids is None: raise ValueError( "Either `decoder_input_ids` or `labels` should be passed when calling" " `TFBlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you" " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`" ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64) question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict, training=training, ) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state if labels is not None and decoder_input_ids is None: # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153 decoder_input_ids = labels answer_output = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, return_dict=return_dict, training=training, ) if labels is not None: decoder_loss = tf.reduce_mean(answer_output.loss) if return_dict else tf.reduce_mean(answer_output[0]) else: decoder_loss = None if not return_dict: outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple(output for output in outputs if output is not None) return TFBlipTextVisionModelOutput( loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) def generate( self, input_ids: tf.Tensor, pixel_values: tf.Tensor, attention_mask: tf.Tensor | None = None, **generate_kwargs, ) -> tf.Tensor: r""" Overrides *generate* function to be able to use the model as a conditional generator Parameters: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`: Input image to be processed attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for tokens that are NOT MASKED, `0` for MASKED tokens. generate_kwargs (dict, *optional*): Additional arguments passed to the `generate` function of the decoder Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="tf") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ``` """ vision_outputs = self.vision_model(pixel_values=pixel_values) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32) if isinstance(input_ids, list): input_ids = tf.Tensor(input_ids) question_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False, ) question_embeds = question_outputs[0] question_attention_mask = tf.ones(shape_list(question_embeds)[:-1], dtype=tf.int32) bos_ids = tf.fill( (tf.shape(question_embeds)[0], 1), value=tf.cast(self.decoder_start_token_id, input_ids.dtype) ) outputs = self.text_decoder.generate( input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "text_encoder", None) is not None: with tf.name_scope(self.text_encoder.name): self.text_encoder.build(None) if getattr(self, "text_decoder", None) is not None: with tf.name_scope(self.text_decoder.name): self.text_decoder.build(None)
class_definition
54,154
64,270
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,144
class TFBlipForImageTextRetrieval(TFBlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model") self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False) # vision projection layer self.vision_proj = keras.layers.Dense( config.image_text_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="vision_proj", ) # text projection layer self.text_proj = keras.layers.Dense( config.image_text_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="text_proj", ) # image text matching head self.itm_head = keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="itm_head" ) self.decoder_pad_token_id = ( config.text_config.pad_token_id if not hasattr(config, "decoder_pad_token_id") else config.decoder_pad_token_id ) self.decoder_start_token_id = ( config.text_config.bos_token_id if not hasattr(config, "decoder_start_token_id") else config.decoder_start_token_id ) self.config = config def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipImageTextMatchingModelOutput, config_class=BlipVisionConfig) def call( self, input_ids: tf.Tensor, pixel_values: tf.Tensor | None = None, use_itm_head: Optional[bool] = True, attention_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[Tuple, TFBlipImageTextMatchingModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFBlipForImageTextRetrieval >>> model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "an image of a cat" >>> inputs = processor(images=image, text=text, return_tensors="tf") >>> outputs = model(**inputs) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[0] image_atts = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64) # Matt: In PyTorch, only one path (itm/non-itm) is taken. However, in TensorFlow this can result in # some layers not being built! To avoid this, we always call both paths, then use an if statement to select # which output to pass to the final output. The unnecessary nodes will be pruned from the final graph, but # not before the layers have all been built correctly. itm_question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict, training=training, ) itm_question_embeds = itm_question_embeds[0] if not return_dict else itm_question_embeds.last_hidden_state itm_output = self.itm_head(itm_question_embeds[:, 0, :]) no_itm_question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict, training=training, ) no_itm_question_embeds = ( no_itm_question_embeds[0] if not return_dict else no_itm_question_embeds.last_hidden_state ) image_feat, _ = tf.linalg.normalize(self.vision_proj(image_embeds[:, 0, :]), ord=2, axis=-1) text_feat, _ = tf.linalg.normalize(self.text_proj(no_itm_question_embeds[:, 0, :]), ord=2, axis=-1) no_itm_output = tf.matmul(image_feat, text_feat, transpose_b=True) if use_itm_head: output = itm_output question_embeds = itm_question_embeds else: output = no_itm_output question_embeds = no_itm_question_embeds if not return_dict: outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,) return tuple(output for output in outputs if output is not None) return TFBlipImageTextMatchingModelOutput( itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, question_embeds=question_embeds, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "text_encoder", None) is not None: with tf.name_scope(self.text_encoder.name): self.text_encoder.build(None) if getattr(self, "vision_proj", None) is not None: with tf.name_scope(self.vision_proj.name): self.vision_proj.build([None, None, self.config.vision_config.hidden_size]) if getattr(self, "text_proj", None) is not None: with tf.name_scope(self.text_proj.name): self.text_proj.build([None, None, self.config.text_config.hidden_size]) if getattr(self, "itm_head", None) is not None: with tf.name_scope(self.itm_head.name): self.itm_head.build([None, None, self.config.text_config.hidden_size])
class_definition
64,592
71,311
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_tf_blip.py
null
6,145
class BlipTextEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class_definition
1,422
3,488
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,146
class BlipTextSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function) attention_scores = attention_scores + attention_mask.to(attention_scores.device) # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs
class_definition
3,569
10,159
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,147
class BlipTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
10,252
10,862
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,148
class BlipTextAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BlipTextSelfAttention(config, is_cross_attention) self.output = BlipTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
10,943
12,983
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,149
class BlipTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
13,078
13,647
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,150
class BlipTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
13,736
14,348
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,151
class BlipTextLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BlipTextAttention(config) self.layer_num = layer_num if self.config.is_decoder: self.crossattention = BlipTextAttention(config, is_cross_attention=self.config.is_decoder) self.intermediate = BlipTextIntermediate(config) self.output = BlipTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class_definition
14,351
16,983
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,152
class BlipTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.is_decoder else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, )
class_definition
17,065
20,838
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,153
class BlipTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class_definition
20,925
21,488
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,154
class BlipTextPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class_definition
21,592
22,296
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,155
class BlipTextLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BlipTextPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class_definition
22,393
23,233
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,156
class BlipTextOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BlipTextLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class_definition
23,325
23,647
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,157
class BlipTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BlipTextConfig base_model_prefix = "bert" _no_split_modules = [] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class_definition
23,729
24,626
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,158
class BlipTextModel(BlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BlipTextEmbeddings(config) self.encoder = BlipTextEncoder(config) self.pooler = BlipTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype ), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, is_decoder: Optional[bool] = False, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length))).to(device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device, is_decoder ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) else: embedding_output = encoder_embeds encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, )
class_definition
24,744
36,998
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,159
class BlipTextLMHeadModel(BlipTextPreTrainedModel, GenerationMixin): def __init__(self, config): super().__init__(config) self.bert = BlipTextModel(config, add_pooling_layer=False) self.cls = BlipTextOnlyMLMHead(config) self.label_smoothing = config.label_smoothing def get_input_embeddings(self): return self.bert.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.bert.set_input_embeddings(new_embeddings) def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, return_logits: Optional[bool] = False, is_decoder: Optional[bool] = True, reduction: Optional[str] = "mean", ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device) loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing) lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if reduction == "none": lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): # Overwrite -- hardcoded key return (`is_decoder=True`) input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values, "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), "is_decoder": True, } def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
class_definition
37,080
44,174
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip_text.py
null
6,160
class BlipForConditionalGenerationModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. Args: loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Languge modeling loss from the text decoder. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head of the text decoder model. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*): The image embeddings obtained after applying the Vision Transformer model to the input image. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @property def decoder_logits(self): warnings.warn( "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers." " Please use the `logits` attribute to retrieve the final output instead.", FutureWarning, ) return self.logits
class_definition
2,009
4,562
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,161
class BlipTextVisionModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Languge modeling loss from the text decoder. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
4,576
6,645
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,162
class BlipImageTextMatchingModelOutput(ModelOutput): """ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity scores. Args: itm_score (`torch.FloatTensor`): The image-text similarity scores. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Languge modeling loss from the text decoder. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*): Last layer hidden-state of the vision of the vision-only branch of the model. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. question_embeds (`torch.FloatTensor`): The question embeddings obtained by the text projection layer. """ itm_score: Optional[torch.FloatTensor] = None loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None vision_pooler_output: Optional[torch.FloatTensor] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None question_embeds: Optional[Tuple[torch.FloatTensor]] = None
class_definition
6,659
9,357
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,163
class BlipOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. text_model_output(`BaseModelOutputWithPooling`): The output of the [`BlipTextModel`]. vision_model_output(`BaseModelOutputWithPooling`): The output of the [`BlipVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
9,371
11,229
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,164
class BlipVisionEmbeddings(nn.Module): def __init__(self, config: BlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding class_pos_embed = self.position_embedding[:, :1] patch_pos_embed = self.position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings
class_definition
11,232
14,642
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,165
class BlipTextEmbeddings(nn.Module): def __init__(self, config: BlipTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class_definition
14,733
16,311
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,166
class BlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = ( self.qkv(hidden_states) .reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads) .permute(2, 0, 3, 1, 4) ) query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs
class_definition
16,314
19,072
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,167
class BlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
19,152
19,722
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,168
class BlipEncoderLayer(nn.Module): def __init__(self, config: BlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = BlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
19,725
21,563
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,169
class BlipPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BlipConfig base_model_prefix = "blip" supports_gradient_checkpointing = True _no_split_modules = ["BlipEncoderLayer", "BlipTextEmbeddings"] _skip_keys_device_placement = ["past_key_value"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() if isinstance(module, BlipVisionEmbeddings): if hasattr(self.config, "vision_config"): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_( module.position_embedding, mean=0.0, std=factor, ) nn.init.trunc_normal_( module.class_embedding, mean=0.0, std=factor, ) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class_definition
21,566
23,085
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,170
class BlipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`. """ def __init__(self, config: BlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
28,822
32,553
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,171
class BlipVisionModel(BlipPreTrainedModel): main_input_name = "pixel_values" config_class = BlipVisionConfig def __init__(self, config: BlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = BlipVisionEmbeddings(config) self.encoder = BlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings
class_definition
32,556
35,031
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,172
class BlipModel(BlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = BlipTextModel(text_config) self.vision_model = BlipVisionModel(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) logger.warning( "`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase." ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) def get_multimodal_features( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings obtained by applying the image embeddings to the text encoder using the cross-attention mechanism. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt") >>> multimodal_features = model.get_multimodal_features(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=True, output_hidden_states=True, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict, ) pooled_output = text_outputs[1] # pooled_output multimodal_features = self.text_projection(pooled_output) return multimodal_features @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, BlipOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use BLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp().to(device=text_embeds.device) image_embeds = image_embeds.to(device=text_embeds.device, dtype=text_embeds.dtype) logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = blip_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return BlipOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
class_definition
35,291
46,505
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,173
class BlipForConditionalGeneration(BlipPreTrainedModel, GenerationMixin): config_class = BlipConfig _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"] main_input_name = "pixel_values" def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.text_decoder.get_input_embeddings() def set_input_embeddings(self, value): self.text_decoder.set_input_embeddings(value) @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig) def forward( self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, BlipForConditionalGenerationModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A picture of" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] outputs = self.text_decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, return_dict=return_dict, reduction="mean", ) if not return_dict: outputs = (outputs[0], outputs[1]) if labels is not None else (outputs[0],) outputs += (image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple(output for output in outputs if output is not None) return BlipForConditionalGenerationModelOutput( loss=outputs.loss, logits=outputs.logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: r""" Overrides *generate* function to be able to use the model as a conditional generator Parameters: pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): The sequence used as a prompt for the generation. attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForConditionalGeneration >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) two cats sleeping on a couch ``` """ batch_size = pixel_values.shape[0] vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) elif input_ids is None: input_ids = ( torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]]) .repeat(batch_size, 1) .to(image_embeds.device) ) input_ids[:, 0] = self.config.text_config.bos_token_id attention_mask = attention_mask[:, :-1] if attention_mask is not None else None outputs = self.text_decoder.generate( input_ids=input_ids[:, :-1], eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **generate_kwargs, ) return outputs
class_definition
47,037
53,875
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,174
class BlipForQuestionAnswering(BlipPreTrainedModel): config_class = BlipConfig _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"] def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id # Initialize weights and apply final processing self.post_init() def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) def get_input_embeddings(self): # This will return shared embeddings if they are shared else specific to encoder. return self.text_encoder.get_input_embeddings() @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig) def forward( self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, BlipTextVisionModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # training >>> text = "How many cats are in the picture?" >>> label = "2" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> labels = processor(text=label, return_tensors="pt").input_ids >>> inputs["labels"] = labels >>> outputs = model(**inputs) >>> loss = outputs.loss >>> loss.backward() >>> # inference >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ```""" if labels is None and decoder_input_ids is None: raise ValueError( "Either `decoder_input_ids` or `labels` should be passed when calling `forward` with" " `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you" " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`" ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict, ) if labels is not None and decoder_input_ids is None: # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153 decoder_input_ids = labels question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state answer_output = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, return_dict=return_dict, reduction="mean", ) if labels is not None: decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean() else: decoder_loss = None if not return_dict: outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple(output for output in outputs if output is not None) return BlipTextVisionModelOutput( loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) @torch.no_grad() def generate( self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: r""" Overrides *generate* function to be able to use the model as a conditional generator Parameters: input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*): The sequence used as a prompt for the generation. pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for tokens that are NOT MASKED, `0` for MASKED tokens. **generate_kwargs: Additional arguments passed to the *generate* function of the decoder Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ``` """ vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) question_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False, ) question_embeds = question_outputs[0] question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device) bos_ids = torch.full( (question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device ) outputs = self.text_decoder.generate( input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs, ) return outputs
class_definition
54,280
63,323
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,175
class BlipForImageTextRetrieval(BlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) # vision projection layer self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size) # text projection layer self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size) # image text matching head self.itm_head = nn.Linear(config.text_config.hidden_size, 2) self.decoder_pad_token_id = ( config.text_config.pad_token_id if not hasattr(config, "decoder_pad_token_id") else config.decoder_pad_token_id ) self.decoder_start_token_id = ( config.text_config.bos_token_id if not hasattr(config, "decoder_start_token_id") else config.decoder_start_token_id ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig) def forward( self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, use_itm_head: Optional[bool] = True, attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, BlipTextVisionModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForImageTextRetrieval >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "an image of a cat" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) if use_itm_head: question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict, ) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state output = self.itm_head(question_embeds[:, 0, :]) else: question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict, ) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1) text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1) output = image_feat @ text_feat.t() if not return_dict: outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,) return tuple(output for output in outputs if output is not None) return BlipImageTextMatchingModelOutput( itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, question_embeds=question_embeds, )
class_definition
63,645
68,642
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/blip/modeling_blip.py
null
6,176
class RetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None doc_scores: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
1,305
8,630
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,177
class RetrievAugLMOutput(ModelOutput): """ Args: logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None doc_scores: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
8,644
15,713
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,178
class RagPreTrainedModel(PreTrainedModel): r""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _supports_flash_attn_2 = True _supports_sdpa = True @classmethod def from_pretrained(cls, *args, **kwargs): # At the moment fast initialization is not supported # for composite models kwargs["_fast_init"] = False return super().from_pretrained(*args, **kwargs) @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str = None, generator_pretrained_model_name_or_path: str = None, retriever: RagRetriever = None, **kwargs, ) -> PreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the question encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagModel >>> # initialize a RAG from two pretrained models. >>> model = RagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load fine-tuned model >>> model = RagModel.from_pretrained("./rag") ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder.keys(): del kwargs["question_encoder_" + key] for key in kwargs_generator.keys(): del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_auto import AutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder, return_unused_kwargs=True, ) kwargs_question_encoder["config"] = question_encoder_config question_encoder = AutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder ) generator = kwargs_generator.pop("model", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_auto import AutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config, kwargs_generator = AutoConfig.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True ) kwargs_generator["config"] = generator_config generator = AutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator ) # instantiate config with corresponding kwargs config = kwargs.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
class_definition
15,716
23,889
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,179
class RagModel(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an question_encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config) if question_encoder is None: from ..auto.modeling_auto import AutoModel question_encoder = AutoModel.from_config(config.question_encoder) if generator is None: from ..auto.modeling_auto import AutoModelForSeq2SeqLM generator = AutoModelForSeq2SeqLM.from_config(config.generator) self.retriever = retriever if self.retriever is not None: assert isinstance( retriever, RagRetriever ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" self.retriever = retriever self.question_encoder = question_encoder self.generator = generator self.ctx_encoder = None self.context_encoder_training = False @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, doc_scores: Optional[torch.FloatTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, n_docs: Optional[int] = None, ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"]) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True ) question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) if self.context_encoder_training: ( context_input_ids, context_attention_mask, retrieved_doc_embeds, retrived_doc_input_ids, retrived_doc_attention_mask, retrieved_doc_ids, ) = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["tokenized_doc_ids"], retriever_outputs["tokenized_doc_attention_mask"], retriever_outputs["doc_ids"], ) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids) retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids) retrieved_doc_embeds = self.ctx_encoder( retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True ).pooler_output retrieved_doc_embeds = retrieved_doc_embeds.view( -1, n_docs, question_encoder_last_hidden_state.shape[1] ) # reshaping # compute doc_scores involving ctx_encoder doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0) gen_outputs = self.generator( input_ids=context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=True, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return RetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, generator_cross_attentions=gen_outputs.cross_attentions, )
class_definition
31,012
42,438
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,180
class RagSequenceForGeneration(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, exclude_bos_score: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, decoder_input_ids, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, exclude_bos_score=exclude_bos_score, n_docs=n_docs, ) return RetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, do_deduplication: Optional[bool] = None, # defaults to True num_return_sequences: Optional[int] = None, # defaults to 1 num_beams: Optional[int] = None, # defaults to 1 n_docs: Optional[int] = None, **model_kwargs, ) -> torch.LongTensor: """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`Dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert ( input_ids is not None or context_input_ids is not None ), " At least one of input_ids or context_input_ids must be given" if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", )["context_input_ids"] # set to correct device context_input_ids = context_input_ids.to(input_ids) hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication, max_output_len output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = generator_input_ids.repeat( num_candidates, 1 ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs] outputs = self( context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1] # add hypothesis hypos.append(output_sequences[top_cand_inds]) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) n_docs = n_docs if n_docs is not None else self.config.n_docs # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all() def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) # seq_logits dim = (batch*n_docs, tgt_len , #vocabs) seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) # batch_size x n_docs x tgt_len x #vocab_size doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1) # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2) # calculate loss target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2) smooth_obj = smooth_obj.sum(2) ll = ll.logsumexp(1) # logsumexp over docs smooth_obj = smooth_obj.logsumexp(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss @staticmethod def _cat_and_pad(tensors, pad_token_id): output = ( tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id) ) ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]] = t ind += t.shape[0] return output
class_definition
42,637
61,568
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,181
class RagTokenForGeneration(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): # Overwritten -- `do_marginalize` is explicitly set in the output if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _reorder_cache(past_key_values, beam_idx): """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs""" def _reorder_stacked(hidden_states, new_order): n_docs = hidden_states.shape[0] // new_order.shape[0] hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:]) hidden_states = hidden_states.index_select(0, new_order) result = hidden_states.view(-1, *hidden_states.shape[2:]) return result reordered_past = () for layer_past in past_key_values: # get the correct batch idx from decoder layer's batch dim for cross and self-attn reordered_past += ( tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) doc_logprobs = torch.log_softmax(doc_scores, dim=1) log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1) return torch.logsumexp(log_prob_sum, dim=1) @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, do_marginalize: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return RetrievAugLMMarginOutput( loss=loss, logits=logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, n_docs: Optional[int] = None, generation_config: Optional[GenerationConfig] = None, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), **kwargs, ) -> torch.LongTensor: """ Implements RAG token decoding. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which has the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None self._prepare_special_tokens(generation_config, kwargs_has_attention_mask) # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # batch_size batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True) input_ids = torch.full( (batch_size * generation_config.num_beams, 1), generation_config.decoder_start_token_id, dtype=torch.long, device=next(self.parameters()).device, ) input_ids_seq_length = input_ids.shape[-1] last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): # split into `batch_size`, `num_beams`, `num_docs` tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:]) # repeat same last hidden states over `num_beams` dimension tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:]) # merge `batch_size`, `num_beams`, `num_docs` dims again return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:]) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=context_input_ids, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, logits_processor=logits_processor, device=input_ids.device, ) prepared_stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_config.num_beams == 1: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) return self._sample( input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, streamer=None, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=generation_config.num_beams, device=self.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, num_beam_hyps_to_keep=generation_config.num_return_sequences, max_length=generation_config.max_length, ) return self._beam_search( input_ids, beam_scorer, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.rag.generator.set_output_embeddings(new_embeddings) def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.config.decoder_start_token_id shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = start_token_id return shifted_input_ids def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) target = target.unsqueeze(-1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) ll = ll.sum(1) # sum over tokens smooth_obj = smooth_obj.sum(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
class_definition
61,761
86,275
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_rag.py
null
6,182
class RagConfig(PretrainedConfig): model_type = "rag" is_composition = True def __init__( self, vocab_size=None, is_encoder_decoder=True, prefix=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, decoder_start_token_id=None, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, retrieval_vector_size=768, retrieval_batch_size=8, dataset="wiki_dpr", dataset_split="train", index_name="compressed", index_path=None, passages_path=None, use_dummy_dataset=False, reduce_loss=False, label_smoothing=0.0, do_deduplication=True, exclude_bos_score=False, do_marginalize=False, output_retrieved=False, use_cache=True, forced_eos_token_id=None, dataset_revision=None, **kwargs, ): super().__init__( bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, is_encoder_decoder=is_encoder_decoder, prefix=prefix, vocab_size=vocab_size, **kwargs, ) if "question_encoder" not in kwargs or "generator" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"both `question_encoder` and `generator` sub-configurations were not passed, only {kwargs}" ) question_encoder_config = kwargs.pop("question_encoder") question_encoder_model_type = question_encoder_config.pop("model_type") decoder_config = kwargs.pop("generator") decoder_model_type = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config) self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config) self.reduce_loss = reduce_loss self.label_smoothing = label_smoothing self.exclude_bos_score = exclude_bos_score self.do_marginalize = do_marginalize self.title_sep = title_sep self.doc_sep = doc_sep self.n_docs = n_docs self.max_combined_length = max_combined_length self.dataset = dataset self.dataset_split = dataset_split self.index_name = index_name self.retrieval_vector_size = retrieval_vector_size self.retrieval_batch_size = retrieval_batch_size self.passages_path = passages_path self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset self.dataset_revision = dataset_revision self.output_retrieved = output_retrieved self.do_deduplication = do_deduplication self.use_cache = use_cache if self.forced_eos_token_id is None: self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None) @classmethod def from_question_encoder_generator_configs( cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`EncoderDecoderConfig`]: An instance of a configuration object """ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
class_definition
4,699
8,486
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/configuration_rag.py
null
6,183
class TFRetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None question_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
class_definition
1,372
7,819
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,184
class TFRetrievAugLMOutput(ModelOutput): """ Args: logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None question_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
class_definition
7,833
14,020
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,185
class TFRagPreTrainedModel(TFPreTrainedModel): r""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str = None, generator_pretrained_model_name_or_path: str = None, retriever: RagRetriever = None, *model_args, **kwargs, ) -> TFPreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the question encoder. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-bert/bert-base-uncased`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, `question_encoder_from_pt` should be set to `True`. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-t5/t5-small`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `facebook/bart-base`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, `generator_from_pt` should be set to `True`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagRetriever, TFRagModel >>> # initialize a RAG from two pretrained models. >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # alternatively, initialize from pytorch pretrained models can also be done >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", ... "facebook/bart-base", ... generator_from_pt=True, ... question_encoder_from_pt=True, ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load retriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # load fine-tuned model with retriever >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever) ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder.keys(): del kwargs["question_encoder_" + key] for key in kwargs_generator.keys(): del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_tf_auto import TFAutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) kwargs_question_encoder["config"] = question_encoder_config question_encoder = TFAutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, name="question_encoder", load_weight_prefix=cls.load_weight_prefix, *model_args, **kwargs_question_encoder, ) generator = kwargs_generator.pop("generator", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) kwargs_generator["config"] = generator_config generator = TFAutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, name="generator", load_weight_prefix=cls.load_weight_prefix, **kwargs_generator, ) # instantiate config with corresponding kwargs config = kwargs.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
class_definition
14,023
22,226
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,186
class TFRagModel(TFRagPreTrainedModel): load_weight_prefix = "tf_rag_model_1" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, load_weight_prefix: Optional[str] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an question_encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config, **kwargs) if question_encoder is None: from ..auto.modeling_tf_auto import TFAutoModel question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder") if generator is None: from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix generator = TFAutoModelForSeq2SeqLM.from_config( config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator" ) self.retriever = retriever if self.retriever is not None: assert isinstance( retriever, RagRetriever ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" self.retriever = retriever self.question_encoder = question_encoder self.generator = generator def set_retriever(self, retriever: RagRetriever): self.retriever = retriever @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFRetrievAugLMOutput: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> input_ids = input_dict["input_ids"] >>> outputs = model(input_ids) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py # aliasing to minimize code changing n_docs = n_docs if n_docs is not None else self.config.n_docs # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True, training=training ) # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91 question_encoder_last_hidden_state = question_enc_outputs[ 0 ] # hidden states of question encoder => pooler_output retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_encoder_last_hidden_state, axis=1), retrieved_doc_embeds, transpose_b=True, ), axis=1, ) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0) if decoder_attention_mask is not None: decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0) gen_outputs = self.generator( context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, return_dict=True, training=training, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return TFRetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.generator.name): self.generator.build(None) with tf.name_scope(self.question_encoder.name): self.question_encoder.build(None)
class_definition
29,544
39,958
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,187
class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_token_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """ RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates and takes care of the extra dimension for ndocs. """ def gather_fn(tensor): is_rag_cache = tensor.shape[0] != beam_indices.shape[0] if is_rag_cache: n_docs = tensor.shape[0] // beam_indices.shape[0] batch_size = beam_indices.shape[0] # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:])) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if is_rag_cache: # reshapes back into the shape expected by beam search gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:])) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]]) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice log_prob_sum = seq_logprobs + doc_logprobs return tf.reduce_logsumexp(log_prob_sum, axis=1) @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, do_marginalize: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> TFRetrievAugLMMarginOutput: r""" do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Token model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return TFRetrievAugLMMarginOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, doc_scores=outputs.doc_scores, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, n_docs=None, generation_config=None, logits_processor=TFLogitsProcessorList(), **kwargs, ): """ Implements TFRAG token decoding. Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`TFLogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.numpy().astype(np.float32), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.matmul( tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True ) doc_scores = tf.squeeze(doc_scores, axis=1) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder( input_ids=context_input_ids, attention_mask=context_attention_mask, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, return_dict=True, ) decoder_input_ids = tf.fill( (batch_size * generation_config.num_beams, 1), tf.cast(generation_config.decoder_start_token_id, tf.int32), ) last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): """ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs , d) Output: tensor of shape (batch_size*num_beams*n_docs , d) """ # expand batch_size & num_beam dimensions d_shape_list = tensor.shape[1:] # split n_docs dimensions new_shape = (batch_size, 1, n_docs) + d_shape_list tensor = tf.reshape(tensor, new_shape) # repeat same last hidden states over `num_beams` dimension new_shape = (batch_size, num_beams, n_docs) + d_shape_list tensor = tf.broadcast_to(tensor, new_shape) # merge `batch_size`, `num_beams`, `num_docs` dims again new_shape = (batch_size * num_beams * n_docs,) + d_shape_list return tf.reshape(tensor, new_shape) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=tf.shape(decoder_input_ids)[-1], logits_processor=logits_processor, ) if generation_config.num_beams == 1: return self.greedy_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectivelly)" ) def unflatten_beam_dim(tensor): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:]) decoder_input_ids = unflatten_beam_dim(decoder_input_ids) model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"]) model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) return self.beam_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() # Adapted from tf_t5's & tf_bart's _shift_right def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.generator.config.decoder_start_token_id assert start_token_id is not None, ( "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as" " generator, see Bart docs for more information" ) pad_token_id = self.generator.config.pad_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # nll stands for 'negative log likelihood' def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left (from original Pytorch's version) target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) return loss # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False): """CrossEntropyLoss that ignores pad tokens""" # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things # and I don't feel comfortable converting it. loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.SUM, ) if from_logits is False: # convert to logits eps = 1e-9 y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps) y_pred = tf.math.log(y_pred) logits = y_pred melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) nll_loss = loss_fn(labels, reduced_logits) smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1) smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch eps_i = smooth_epsilon / reduced_logits.shape[-1] loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss return loss def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None)
class_definition
40,154
66,857
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,188
class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_sequence_for_generation_1/rag" def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, n_docs: Optional[int] = None, exclude_bos_score: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagSequenceForGeneration.from_pretrained( ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True ... ) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert ( "decoder_cached_states" not in kwargs ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) return TFRetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id n_docs = n_docs if n_docs is not None else self.config.n_docs equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id)) use_bos = bos_token_id is not None and equal_bos_token_id_all def _mask_pads(ll, smooth_obj): pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype)) if tf.reduce_any(pad_mask): ll = tf.where(pad_mask, 0.0, ll) smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1) # seq_logits.shape = (batch*n_docs, tgt_len , vocabs) seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape( seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]) ) # (batch_size, n_docs, tgt_len, vocabs) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2) # calculate loss target = tf.expand_dims(target, axis=1) # n_docs dimension target = tf.expand_dims(target, axis=-1) # logits dimension target = tf.repeat(target, n_docs, axis=1) assert len(target.shape) == len(rag_logprobs.shape) # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering def torch_gather(param, id_tensor): # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather def gather2d(target, id_tensor): idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1) result = tf.gather_nd(target, idx) return tf.expand_dims(result, axis=-1) target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D target_shape = id_tensor.shape id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index result = gather2d(target, id_tensor) return tf.reshape(result, target_shape) ll = torch_gather(rag_logprobs, id_tensor=target) smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring if exclude_bos_score and use_bos: ll = tf.reduce_sum(ll[:, :, 1:], axis=2) else: ll = tf.reduce_sum(ll, axis=2) smooth_obj = tf.reduce_sum(smooth_obj, axis=2) ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = tf.reduce_sum(nll_loss) smooth_loss = tf.reduce_sum(smooth_loss) eps_i = epsilon / rag_logprobs.shape[-1] loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, do_deduplication=None, # defaults to True num_return_sequences=None, # defaults to 1 num_beams=None, # defaults to 1 n_docs=None, **model_kwargs, ): """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`Dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`] Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert ( input_ids is not None or context_input_ids is not None ), " At least one of input_ids or context_input_ids must be given" if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", )["context_input_ids"] hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication -- for TF, work on Eager mode only! output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1)) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = tf.tile( generator_input_ids, (num_candidates, 1) ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1)) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs] outputs = self( input_ids=None, context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1] # add hypothesis hypos.append(tf.gather(output_sequences, top_cand_inds)) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) @staticmethod def _cat_and_pad(tensors, pad_token_id): # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch # Initialize padded tensor with shape ( all_candidates , max_candidate_length ), # where all_candidates counted from all inputs new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]) output = tf.fill(new_shape, pad_token_id) # Normal tensor doesn't support slice assignment, so we need tf.Variable output = tf.Variable(output) # Assign, and then convert back to tensor ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]].assign(t) ind += t.shape[0] output = tf.convert_to_tensor(output) return tf.cast(output, tensors[0][0][0].dtype) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None)
class_definition
67,059
88,808
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/modeling_tf_rag.py
null
6,189
class Index: """ A base class for the Indices encapsulated by the [`RagRetriever`]. """ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: """ Returns a list of dictionaries, containing titles and text of the retrieved documents. Args: doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`): A tensor of document indices. """ raise NotImplementedError def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: """ For each query in the batch, retrieves `n_docs` documents. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): An array of query vectors. n_docs (`int`): The number of docs retrieved per query. Returns: `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. """ raise NotImplementedError def is_initialized(self): """ Returns `True` if index is already initialized. """ raise NotImplementedError def init_index(self): """ A function responsible for loading the index into memory. Should be called only once per training run of a RAG model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load the index. """ raise NotImplementedError
class_definition
1,351
2,989
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,190
class LegacyIndex(Index): """ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use default faiss index parameters as specified in that repository. Args: vector_size (`int`): The dimension of indexed vectors. index_path (`str`): A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`] """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" PASSAGE_FILENAME = "psgs_w100.tsv.pkl" def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): is_local = os.path.isdir(index_path) try: # Load from URL or cache if already cached resolved_archive_file = cached_file(index_path, filename) except EnvironmentError: msg = ( f"Can't load '{filename}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) if is_local: logger.info(f"loading file {resolved_archive_file}") else: logger.info(f"loading file {filename} from cache at {resolved_archive_file}") return resolved_archive_file def _load_passages(self): logger.info(f"Loading passages from {self.index_path}") passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with open(passages_path, "rb") as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info(f"Loading index from {self.index_path}") resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with open(resolved_meta_path, "rb") as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert ( len(self.index_id_to_db_id) == self.index.ntotal ), "Deserialized index_id_to_db_id should match faiss index size" def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict["title"] = [doc[1] for doc in docs] doc_dict["text"] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return np.array(ids), np.array(vectors)
class_definition
2,992
8,230
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,191
class HFIndexBase(Index): def __init__(self, vector_size, dataset, index_initialized=False): self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " f"but got columns {self.dataset.column_names}" ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
class_definition
8,233
10,410
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,192
class CanonicalHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_name (`str`, optional, defaults to `wiki_dpr`): A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids with `datasets.list_datasets()`). dataset_split (`str`, optional, defaults to `train`) Which split of the `dataset` to load. index_name (`str`, optional, defaults to `train`) The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved under this name. index_path (`str`, optional, defaults to `None`) The path to the serialized faiss index on disk. use_dummy_dataset (`bool`, optional, defaults to `False`): If True, use the dummy configuration of the dataset for tests. """ def __init__( self, vector_size: int, dataset_name: str = "wiki_dpr", dataset_split: str = "train", index_name: Optional[str] = None, index_path: Optional[str] = None, use_dummy_dataset=False, dataset_revision=None, ): if int(index_path is None) + int(index_name is None) != 1: raise ValueError("Please provide `index_name` or `index_path`.") self.dataset_name = dataset_name self.dataset_split = dataset_split self.index_name = index_name self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset self.dataset_revision = dataset_revision logger.info(f"Loading passages from {self.dataset_name}") dataset = load_dataset( self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset, revision=dataset_revision, ) super().__init__(vector_size, dataset, index_initialized=False) def init_index(self): if self.index_path is not None: logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) else: logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}") self.dataset = load_dataset( self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, revision=self.dataset_revision, ) self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) self._index_initialized = True
class_definition
10,413
13,474
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,193
class CustomHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_path (`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info(f"Loading passages from {dataset_path}") if dataset_path is None or index_path is None: raise ValueError( "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " "and `dataset.get_index('embeddings').save(index_path)`." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True
class_definition
13,477
15,068
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,194
class RagRetriever: """ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel. Args: config ([`RagConfig`]): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical one (default) from the datasets library with `config.index_name="wiki_dpr"` for example. question_encoder_tokenizer ([`PreTrainedTokenizer`]): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for the generator part of the RagModel. index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples: ```python >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" ... ) >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset = ( ... ... ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", ... index_name="custom", ... passages_path=dataset_path, ... index_path=index_path, ... ) >>> # To load the legacy index built originally for Rag's paper >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy") ```""" def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): self._init_retrieval = init_retrieval requires_backends(self, ["datasets", "faiss"]) super().__init__() self.index = index or self._build_index(config) self.generator_tokenizer = generator_tokenizer self.question_encoder_tokenizer = question_encoder_tokenizer self.n_docs = config.n_docs self.batch_size = config.retrieval_batch_size self.config = config if self._init_retrieval: self.init_retrieval() self.ctx_encoder_tokenizer = None self.return_tokenized_docs = False @staticmethod def _build_index(config): if config.index_name == "legacy": return LegacyIndex( config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH, ) elif config.index_name == "custom": return CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ) else: return CanonicalHFIndex( vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, dataset_revision=config.dataset_revision, ) @classmethod def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): requires_backends(cls, ["datasets", "faiss"]) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, ) def save_pretrained(self, save_directory): if isinstance(self.index, CustomHFIndex): if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index("embeddings").save(index_path) self.config.index_path = index_path if self.config.passages_path is None: passages_path = os.path.join(save_directory, "hf_dataset") # datasets don't support save_to_disk with indexes right now faiss_index = self.index.dataset._indexes.pop("embeddings") self.index.dataset.save_to_disk(passages_path) self.index.dataset._indexes["embeddings"] = faiss_index self.config.passages_path = passages_path self.config.save_pretrained(save_directory) rag_tokenizer = RagTokenizer( question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer, ) rag_tokenizer.save_pretrained(save_directory) def init_retrieval(self): """ Retriever initialization function. It loads the index into memory. """ logger.info("initializing retrieval") self.index.init_index() def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): r""" Postprocessing retrieved `docs` and combining them with `input_strings`. Args: docs (`dict`): Retrieved documents. input_strings (`str`): Input strings decoded by `preprocess_query`. prefix (`str`): Prefix added at the beginning of each input, typically used with T5-based models. Return: `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible `attention_mask`. """ def cat_input_and_doc(doc_title, doc_text, input_string, prefix): # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation # TODO(piktus): better handling of truncation if doc_title.startswith('"'): doc_title = doc_title[1:] if doc_title.endswith('"'): doc_title = doc_title[:-1] if prefix is None: prefix = "" out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( " ", " " ) return out rag_input_strings = [ cat_input_and_doc( docs[i]["title"][j], docs[i]["text"][j], input_strings[i], prefix, ) for i in range(len(docs)) for j in range(n_docs) ] contextualized_inputs = self.generator_tokenizer.batch_encode_plus( rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding="max_length", truncation=True, ) return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) ids_batched = [] vectors_batched = [] for question_hidden_states in question_hidden_states_batched: start_time = time.time() ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) logger.debug( f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}" ) ids_batched.extend(ids) vectors_batched.extend(vectors) return ( np.array(ids_batched), np.array(vectors_batched), ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: """ Retrieves documents for specified `question_hidden_states`. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (`int`): The number of docs retrieved per query. Return: `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query. """ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer): # used in end2end retriever training self.ctx_encoder_tokenizer = ctx_encoder_tokenizer self.return_tokenized_docs = True def __call__( self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None, ) -> BatchEncoding: """ Retrieves documents for specified `question_hidden_states`. Args: question_input_ids (`List[List[int]]`) batch of input ids question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`: A batch of query vectors to retrieve with. prefix (`str`, *optional*): The prefix used by the generator's tokenizer. n_docs (`int`, *optional*): The number of docs retrieved per query. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **context_input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents - **doc_ids** -- List of ids of the retrieved documents """ n_docs = n_docs if n_docs is not None else self.n_docs prefix = prefix if prefix is not None else self.config.generator.prefix retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) context_input_ids, context_attention_mask = self.postprocess_docs( docs, input_strings, prefix, n_docs, return_tensors=return_tensors ) if self.return_tokenized_docs: retrieved_doc_text = [] retrieved_doc_title = [] for b_idx in range(len(docs)): for doc_idx in range(n_docs): retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) tokenized_docs = self.ctx_encoder_tokenizer( retrieved_doc_title, retrieved_doc_text, truncation=True, padding="longest", return_tensors=return_tensors, ) return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, "tokenized_doc_ids": tokenized_docs["input_ids"], "tokenized_doc_attention_mask": tokenized_docs["attention_mask"], }, tensor_type=return_tensors, ) else: return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, }, tensor_type=return_tensors, )
class_definition
15,071
29,921
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/retrieval_rag.py
null
6,195
class RagTokenizer: def __init__(self, question_encoder, generator): self.question_encoder = question_encoder self.generator = generator self.current_tokenizer = self.question_encoder def save_pretrained(self, save_directory): if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer") generator_path = os.path.join(save_directory, "generator_tokenizer") self.question_encoder.save_pretrained(question_encoder_path) self.generator.save_pretrained(generator_path) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer config = kwargs.pop("config", None) if config is None: config = RagConfig.from_pretrained(pretrained_model_name_or_path) question_encoder = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer" ) generator = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer" ) return cls(question_encoder=question_encoder, generator=generator) def __call__(self, *args, **kwargs): return self.current_tokenizer(*args, **kwargs) def batch_decode(self, *args, **kwargs): return self.generator.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.generator.decode(*args, **kwargs) def _switch_to_input_mode(self): self.current_tokenizer = self.question_encoder def _switch_to_target_mode(self): self.current_tokenizer = self.generator def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details", FutureWarning, ) if max_length is None: max_length = self.current_tokenizer.model_max_length model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = self.current_tokenizer.model_max_length labels = self( text_target=tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels["input_ids"] return model_inputs
class_definition
889
4,576
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/rag/tokenization_rag.py
null
6,196
class MobileViTV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileViTV2 [apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 256): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 2): The size (resolution) of each patch. expand_ratio (`float`, *optional*, defaults to 2.0): Expansion factor for the MobileNetv2 layers. hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. conv_kernel_size (`int`, *optional*, defaults to 3): The size of the convolutional kernel in the MobileViTV2 layer. output_stride (`int`, *optional*, defaults to 32): The ratio of the spatial resolution of the output to the resolution of the input image. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. aspp_out_channels (`int`, *optional*, defaults to 512): Number of output channels used in the ASPP layer for semantic segmentation. atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`): Dilation (atrous) factors used in the ASPP layer for semantic segmentation. aspp_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the ASPP layer for semantic segmentation. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. n_attn_blocks (`List[int]`, *optional*, defaults to `[2, 4, 3]`): The number of attention blocks in each MobileViTV2Layer base_attn_unit_dims (`List[int]`, *optional*, defaults to `[128, 192, 256]`): The base multiplier for dimensions of attention blocks in each MobileViTV2Layer width_multiplier (`float`, *optional*, defaults to 1.0): The width multiplier for MobileViTV2. ffn_multiplier (`int`, *optional*, defaults to 2): The FFN multiplier for MobileViTV2. attn_dropout (`float`, *optional*, defaults to 0.0): The dropout in the attention layer. ffn_dropout (`float`, *optional*, defaults to 0.0): The dropout between FFN layers. Example: ```python >>> from transformers import MobileViTV2Config, MobileViTV2Model >>> # Initializing a mobilevitv2-small style configuration >>> configuration = MobileViTV2Config() >>> # Initializing a model from the mobilevitv2-small style configuration >>> model = MobileViTV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilevitv2" def __init__( self, num_channels=3, image_size=256, patch_size=2, expand_ratio=2.0, hidden_act="swish", conv_kernel_size=3, output_stride=32, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-5, aspp_out_channels=512, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, n_attn_blocks=[2, 4, 3], base_attn_unit_dims=[128, 192, 256], width_multiplier=1.0, ffn_multiplier=2, attn_dropout=0.0, ffn_dropout=0.0, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.expand_ratio = expand_ratio self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.n_attn_blocks = n_attn_blocks self.base_attn_unit_dims = base_attn_unit_dims self.width_multiplier = width_multiplier self.ffn_multiplier = ffn_multiplier self.ffn_dropout = ffn_dropout self.attn_dropout = attn_dropout self.classifier_dropout_prob = classifier_dropout_prob # decode head attributes for semantic segmentation self.aspp_out_channels = aspp_out_channels self.atrous_rates = atrous_rates self.aspp_dropout_prob = aspp_dropout_prob self.semantic_loss_ignore_index = semantic_loss_ignore_index
class_definition
914
6,439
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py
null
6,197
class MobileViTV2OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})]) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) @property def atol_for_validation(self) -> float: return 1e-4
class_definition
6,442
7,090
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py
null
6,198
class MobileViTV2ConvLayer(nn.Module): def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, ) -> None: super().__init__() padding = int((kernel_size - 1) / 2) * dilation if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features
class_definition
2,773
4,865
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
null
6,199