text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class MvpForCausalLM(MvpPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MvpDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.lm_head.requires_grad_(False) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import AutoTokenizer, MvpForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 8, 50267] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
class_definition
81,057
90,257
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mvp/modeling_mvp.py
null
4,100
class MvpTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" MVP tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizerFast >>> tokenizer = MvpTokenizerFast.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MvpTokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, trim_offsets=True, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs, ) # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` tokenizer_component = "post_processor" tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: state["sep"] = tuple(state["sep"]) if "cls" in state: state["cls"] = tuple(state["cls"]) changes_to_apply = False if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: state["add_prefix_space"] = add_prefix_space changes_to_apply = True if state.get("trim_offsets", trim_offsets) != trim_offsets: state["trim_offsets"] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop("type")) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: """ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. MVP tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. """ if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet.") return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): """ Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Mvp. """ # Mask token behave like a normal word, i.e. include the space before it # So we set lstrip to True value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
class_definition
1,161
11,798
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mvp/tokenization_mvp_fast.py
null
4,101
class MvpConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50267): Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MvpModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. use_prompt (`bool`, *optional*, defaults to `False`): Whether or not to use prompt. prompt_length (`int`, *optional*, defaults to 100): The length of prompt. prompt_mid_dim (`int`, *optional*, defaults to 800): Dimensionality of the "intermediate" layer in prompt. Example: ```python >>> from transformers import MvpConfig, MvpModel >>> # Initializing a MVP RUCAIBox/mvp style configuration >>> configuration = MvpConfig() >>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration >>> model = MvpModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mvp" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=50267, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, use_prompt=False, prompt_length=100, prompt_mid_dim=800, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.use_prompt = use_prompt self.prompt_length = prompt_length self.prompt_mid_dim = prompt_mid_dim super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs, ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False): self.forced_bos_token_id = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed." )
class_definition
821
8,408
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mvp/configuration_mvp.py
null
4,102
class MvpTokenizer(PreTrainedTokenizer): """ Constructs a MVP tokenizer, which is smilar to the RoBERTa tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import MvpTokenizer >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (MVP tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, **kwargs, ): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") super().__init__( errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs, ) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MVP sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs)
class_definition
2,330
16,191
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mvp/tokenization_mvp.py
null
4,103
class PvtV2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PvtV2Model`]. It is used to instantiate a Pvt V2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt V2 B0 [OpenGVLab/pvt_v2_b0](https://huggingface.co/OpenGVLab/pvt_v2_b0) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`Union[int, Tuple[int, int]]`, *optional*, defaults to 224): The input image size. Pass int value for square image, or tuple of (height, width). num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`[int]`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Spatial reduction ratios in each encoder block. hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`): Dimension of each of the encoder blocks. patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`): Patch size for overlapping patch embedding before each encoder block. strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride for overlapping patch embedding before each encoder block. num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. linear_attention (`bool`, *optional*, defaults to `False`): Use linear attention complexity. If set to True, `sr_ratio` is ignored and average pooling is used for dimensionality reduction in the attention layers rather than strided convolution. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Example: ```python >>> from transformers import PvtV2Model, PvtV2Config >>> # Initializing a pvt_v2_b0 style configuration >>> configuration = PvtV2Config() >>> # Initializing a model from the OpenGVLab/pvt_v2_b0 style configuration >>> model = PvtV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pvt_v2" def __init__( self, image_size: Union[int, Tuple[int, int]] = 224, num_channels: int = 3, num_encoder_blocks: int = 4, depths: List[int] = [2, 2, 2, 2], sr_ratios: List[int] = [8, 4, 2, 1], hidden_sizes: List[int] = [32, 64, 160, 256], patch_sizes: List[int] = [7, 3, 3, 3], strides: List[int] = [4, 2, 2, 2], num_attention_heads: List[int] = [1, 2, 5, 8], mlp_ratios: List[int] = [8, 8, 4, 4], hidden_act: Union[str, Callable] = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, drop_path_rate: float = 0.0, layer_norm_eps: float = 1e-6, qkv_bias: bool = True, linear_attention: bool = False, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) image_size = (image_size, image_size) if isinstance(image_size, int) else image_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sr_ratios = sr_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.linear_attention = linear_attention self.stage_names = [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
class_definition
1,051
7,962
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/configuration_pvt_v2.py
null
4,104
class PvtV2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
3,016
3,495
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,105
class PvtV2OverlapPatchEmbeddings(nn.Module): """Image to Patch Embedding""" def __init__(self, config: PvtV2Config, layer_idx: int): super().__init__() patch_size = config.patch_sizes[layer_idx] patch_size = (patch_size, patch_size) if isinstance(patch_size, int) else patch_size stride = config.strides[layer_idx] num_channels = config.num_channels if layer_idx == 0 else config.hidden_sizes[layer_idx - 1] hidden_size = config.hidden_sizes[layer_idx] self.patch_size = patch_size self.proj = nn.Conv2d( num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2), ) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def forward(self, pixel_values): embeddings = self.proj(pixel_values) _, _, height, width = embeddings.shape embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return embeddings, height, width
class_definition
3,498
4,620
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,106
class PvtV2DepthWiseConv(nn.Module): """ Depth-wise (DW) convolution to infuse positional information using zero-padding. Depth-wise convolutions have an equal number of groups to the number of input channels, meaning one filter per input channel. This reduces the overall parameters and compute costs since the key purpose of this layer is position encoding. """ def __init__(self, config: PvtV2Config, dim: int = 768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states
class_definition
4,623
5,533
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,107
class PvtV2SelfAttention(nn.Module): """Efficient self-attention mechanism.""" def __init__(self, config: PvtV2Config, hidden_size: int, num_attention_heads: int, spatial_reduction_ratio: int): super().__init__() self.linear_attention = config.linear_attention self.pruned_heads = set() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.attn_drop = nn.Dropout(config.attention_probs_dropout_prob) self.proj = nn.Linear(self.hidden_size, self.hidden_size) self.proj_drop = nn.Dropout(config.hidden_dropout_prob) self.spatial_reduction_ratio = spatial_reduction_ratio if self.linear_attention: self.pool = nn.AdaptiveAvgPool2d(7) self.spatial_reduction = nn.Conv2d(self.hidden_size, self.hidden_size, kernel_size=1, stride=1) self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) self.act = nn.GELU() elif spatial_reduction_ratio > 1: self.spatial_reduction = nn.Conv2d( self.hidden_size, self.hidden_size, kernel_size=spatial_reduction_ratio, stride=spatial_reduction_ratio ) self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) def transpose_for_scores(self, hidden_states) -> torch.Tensor: new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False, ) -> Tuple[torch.Tensor]: batch_size, seq_len, num_channels = hidden_states.shape query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.linear_attention: hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = ( self.spatial_reduction(self.pool(hidden_states)).reshape(batch_size, num_channels, -1).permute(0, 2, 1) ) hidden_states = self.act(self.layer_norm(hidden_states)) elif self.spatial_reduction_ratio > 1: hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = ( self.spatial_reduction(hidden_states).reshape(batch_size, num_channels, -1).permute(0, 2, 1) ) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attn_drop(attention_probs) context_layer = (attention_probs @ value_layer).transpose(1, 2).reshape(batch_size, seq_len, num_channels) context_layer = self.proj(context_layer) context_layer = self.proj_drop(context_layer) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads ) # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.proj = prune_linear_layer(self.proj, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads)
class_definition
5,536
10,740
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,108
class PvtV2ConvFeedForwardNetwork(nn.Module): def __init__( self, config: PvtV2Config, in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, ): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = PvtV2DepthWiseConv(config, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.relu = nn.ReLU() if config.linear_attention else nn.Identity() def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.relu(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
10,743
12,091
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,109
class PvtV2BlockLayer(nn.Module): def __init__(self, config: PvtV2Config, layer_idx: int, drop_path: float = 0.0): super().__init__() hidden_size: int = config.hidden_sizes[layer_idx] num_attention_heads: int = config.num_attention_heads[layer_idx] spatial_reduction_ratio: int = config.sr_ratios[layer_idx] mlp_ratio: float = config.mlp_ratios[layer_idx] self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtV2SelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, spatial_reduction_ratio=spatial_reduction_ratio, ) self.drop_path = PvtV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtV2ConvFeedForwardNetwork(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): self_attention_outputs = self.attention( hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs
class_definition
12,094
13,946
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,110
class PvtV2EncoderLayer(nn.Module): def __init__(self, config: PvtV2Config, layer_idx: int): super().__init__() self.patch_embedding = PvtV2OverlapPatchEmbeddings( config=config, layer_idx=layer_idx, ) # Transformer block # stochastic depth decay rule drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths)).tolist() block_layers = [] for block_idx in range(config.depths[layer_idx]): block_layers.append( PvtV2BlockLayer( config=config, layer_idx=layer_idx, drop_path=drop_path_decays[sum(config.depths[:layer_idx]) + block_idx], ) ) self.blocks = nn.ModuleList(block_layers) # Layer norm self.layer_norm = nn.LayerNorm(config.hidden_sizes[layer_idx], eps=config.layer_norm_eps) def forward(self, hidden_states, output_attentions): all_self_attentions = () if output_attentions else None # first, obtain patch embeddings hidden_states, height, width = self.patch_embedding(hidden_states) # second, send embeddings through blocks for block in self.blocks: layer_outputs = block(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions += (layer_outputs[1],) # third, apply layer norm hidden_states = self.layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (all_self_attentions,) return outputs, height, width
class_definition
13,949
15,668
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,111
class PvtV2Encoder(nn.Module): def __init__(self, config: PvtV2Config): super().__init__() self.config = config self.gradient_checkpointing = False # encoder layers self.layers = nn.ModuleList([PvtV2EncoderLayer(config, i) for i in range(config.num_encoder_blocks)]) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for idx, layer in enumerate(self.layers): if self.gradient_checkpointing and self.training: layer_output = self._gradient_checkpointing_func(layer.__call__, hidden_states, output_attentions) else: layer_output = layer(hidden_states, output_attentions) outputs, height, width = layer_output hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) # reshape back to (batch_size, num_channels, height, width) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
15,671
17,577
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,112
class PvtV2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = PvtV2Config base_model_prefix = "pvt_v2" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, nn.Linear): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_()
class_definition
17,580
18,882
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,113
class PvtV2Model(PvtV2PreTrainedModel): def __init__(self, config: PvtV2Config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = PvtV2Encoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
20,465
22,643
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,114
class PvtV2ForImageClassification(PvtV2PreTrainedModel): def __init__(self, config: PvtV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.pvt_v2 = PvtV2Model(config) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.pvt_v2( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # convert last hidden states to (batch_size, height*width, hidden_size) batch_size = sequence_output.shape[0] # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) sequence_output = sequence_output.permute(0, 2, 3, 1) sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1]) # global average pooling sequence_output = sequence_output.mean(dim=1) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
22,878
26,773
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,115
class PvtV2Backbone(PvtV2Model, BackboneMixin): def __init__(self, config: PvtV2Config): super().__init__(config) super()._init_backbone(config) self.num_features = config.hidden_sizes @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") >>> model = AutoBackbone.from_pretrained( ... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 256, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
class_definition
26,919
29,416
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py
null
4,116
class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Donut [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import DonutSwinConfig, DonutSwinModel >>> # Initializing a Donut naver-clova-ix/donut-base style configuration >>> configuration = DonutSwinConfig() >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration >>> model = DonutSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "donut-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
class_definition
799
5,752
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/configuration_donut_swin.py
null
4,117
class DonutSwinEncoderOutput(ModelOutput): """ DonutSwin encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
1,701
3,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,118
class DonutSwinModelOutput(ModelOutput): """ DonutSwin model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
3,780
6,019
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,119
class DonutSwinEmbeddings(nn.Module): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = DonutSwinPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> Tuple[torch.Tensor]: _, num_channels, height, width = pixel_values.shape embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions
class_definition
7,117
11,036
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,120
class DonutSwinPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions
class_definition
11,133
13,318
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,121
class DonutSwinPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature
class_definition
13,391
15,674
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,122
class DonutSwinDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
16,898
17,381
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,123
class DonutSwinSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) ) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class_definition
17,476
22,353
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,124
class DonutSwinSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
22,424
22,866
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,125
class DonutSwinAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size) self.output = DonutSwinSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
22,957
24,650
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,126
class DonutSwinIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
24,723
25,286
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,127
class DonutSwinOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class_definition
25,353
25,777
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,128
class DonutSwinLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = DonutSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DonutSwinIntermediate(config, dim) self.output = DonutSwinOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = torch_int(0) self.window_size = ( torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution) ) def get_attn_mask(self, height, width, dtype, device): if self.shift_size > 0: # calculate attention mask for SW-MSA img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask( height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device ) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs
class_definition
25,864
31,557
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,129
class DonutSwinStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList( [ DonutSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if (i % 2 == 0) else config.window_size // 2, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs
class_definition
31,644
33,868
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,130
class DonutSwinEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_layers = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.layers = nn.ModuleList( [ DonutSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None, ) for i_layer in range(self.num_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, always_partition: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, DonutSwinEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition, ) else: layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return DonutSwinEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, )
class_definition
33,957
38,719
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,131
class DonutSwinPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DonutSwinConfig base_model_prefix = "swin" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["DonutSwinStage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
38,816
39,782
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,132
class DonutSwinModel(DonutSwinPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token) self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=DonutSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, DonutSwinModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DonutSwinModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, )
class_definition
41,845
45,891
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/modeling_donut_swin.py
null
4,133
class DonutFeatureExtractor(DonutImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
812
1,178
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/feature_extraction_donut.py
null
4,134
class DonutProcessorKwargs(ProcessingKwargs, total=False): _defaults = {}
class_definition
957
1,034
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/processing_donut.py
null
4,135
class DonutProcessor(ProcessorMixin): r""" Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single processor. [`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and [`~DonutProcessor.decode`] for more information. Args: image_processor ([`DonutImageProcessor`], *optional*): An instance of [`DonutImageProcessor`]. The image processor is a required input. tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*): An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__( self, images: ImageInput = None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[DonutProcessorKwargs], ): """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility legacy = kwargs.pop("legacy", True) if legacy: # With `add_special_tokens=True`, the performance of donut are degraded when working with both images and text. logger.warning_once( "Legacy behavior is being used. The current behavior will be deprecated in version 5.0.0. " "In the new behavior, if both images and text are provided, the default value of `add_special_tokens` " "will be changed to `False` when calling the tokenizer if `add_special_tokens` is unset. " "To test the new behavior, set `legacy=False`as a processor call argument." ) if self._in_target_context_manager: return self.current_processor(images, text, **kwargs) if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") output_kwargs = self._merge_kwargs( DonutProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) if text is not None: if not legacy and images is not None: output_kwargs["text_kwargs"].setdefault("add_special_tokens", False) encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] # for BC inputs["input_ids"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False def token2json(self, tokens, is_inner_value=False, added_vocab=None): """ Convert a (generated) token sequence into an ordered JSON format. """ if added_vocab is None: added_vocab = self.tokenizer.get_added_vocab() output = {} while tokens: start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE) if start_token is None: break key = start_token.group(1) key_escaped = re.escape(key) end_token = re.search(rf"</s_{key_escaped}>", tokens, re.IGNORECASE) start_token = start_token.group() if end_token is None: tokens = tokens.replace(start_token, "") else: end_token = end_token.group() start_token_escaped = re.escape(start_token) end_token_escaped = re.escape(end_token) content = re.search( f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL ) if content is not None: content = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab) if value: if len(value) == 1: value = value[0] output[key] = value else: # leaf nodes output[key] = [] for leaf in content.split(r"<sep/>"): leaf = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": leaf = leaf[1:-2] # for categorical special tokens output[key].append(leaf) if len(output[key]) == 1: output[key] = output[key][0] tokens = tokens[tokens.find(end_token) + len(end_token) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab) if len(output): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
class_definition
1,077
9,663
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/processing_donut.py
null
4,136
class DonutImageProcessor(BaseImageProcessor): r""" Constructs a Donut image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Image standard deviation. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_thumbnail: bool = True, do_align_long_axis: bool = False, do_pad: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 2560, "width": 1920} if isinstance(size, (tuple, list)): # The previous feature extractor size parameter was in (width, height) format size = size[::-1] size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.do_thumbnail = do_thumbnail self.do_align_long_axis = do_align_long_axis self.do_pad = do_pad self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def align_long_axis( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Align the long axis of the image to the longest axis of the specified size. Args: image (`np.ndarray`): The image to be aligned. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The aligned image. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] if (output_width < output_height and input_width > input_height) or ( output_width > output_height and input_width < input_height ): image = np.rot90(image, 3) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def pad_image( self, image: np.ndarray, size: Dict[str, int], random_padding: bool = False, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad the image to the specified size. Args: image (`np.ndarray`): The image to be padded. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. random_padding (`bool`, *optional*, defaults to `False`): Whether to use random padding or not. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = size["height"], size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) delta_width = output_width - input_width delta_height = output_height - input_height if random_padding: pad_top = np.random.randint(low=0, high=delta_height + 1) pad_left = np.random.randint(low=0, high=delta_width + 1) else: pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = ((pad_top, pad_bottom), (pad_left, pad_right)) return pad(image, padding, data_format=data_format, input_data_format=input_data_format) def pad(self, *args, **kwargs): logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.") return self.pad_image(*args, **kwargs) def thumbnail( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`np.ndarray`): The image to be resized. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] # We always resize to the smallest of either the input or output size. height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return resize( image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size) shortest_edge = min(size["height"], size["width"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) resized_image = resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return resized_image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_thumbnail: bool = None, do_align_long_axis: bool = None, do_pad: bool = None, random_padding: bool = False, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], size["width"]) with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch. random_padding (`bool`, *optional*, defaults to `self.random_padding`): Whether to use random padding when padding the image. If `True`, each image in the batch with be padded with a random amount of padding on each side up to the size of the largest image in the batch. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size if isinstance(size, (tuple, list)): # Previous feature extractor had size in (width, height) format size = size[::-1] size = get_size_dict(size) resample = resample if resample is not None else self.resample do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_align_long_axis: images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_thumbnail: images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] if do_pad: images = [ self.pad_image( image=image, size=size, random_padding=random_padding, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,480
21,758
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/donut/image_processing_donut.py
null
4,137
class SamVisionEncoderOutput(ModelOutput): """ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection layer to the pooler_output. Args: image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
1,368
3,183
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,138
class SamImageSegmentationOutput(ModelOutput): """ Base class for Segment-Anything model's output Args: iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`): The iou scores of the predicted masks. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`): The predicted low resolutions masks. Needs to be post-processed by the processor vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ iou_scores: torch.FloatTensor = None pred_masks: torch.FloatTensor = None vision_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None vision_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None mask_decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
class_definition
3,197
5,333
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,139
class SamPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) return embeddings
class_definition
5,336
7,054
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,140
class SamMLPBlock(nn.Module): def __init__(self, config): super().__init__() self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states
class_definition
7,057
7,566
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,141
class SamLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x
class_definition
7,667
9,138
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,142
class SamAttention(nn.Module): """ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None): super().__init__() self.hidden_size = config.hidden_size downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads if self.internal_dim % config.num_attention_heads != 0: raise ValueError("num_attention_heads must divide hidden_size.") self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, self.hidden_size) def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor: batch, point_batch_size, n_tokens, channel = hidden_states.shape c_per_head = channel // num_attention_heads hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) return hidden_states.transpose(1, 2) def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor: batch, n_heads, n_tokens, c_per_head = hidden_states.shape hidden_states = hidden_states.transpose(1, 2) return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head) def forward(self, query: Tensor, key: Tensor, value: Tensor, attention_similarity: Tensor = None) -> Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = query.shape[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # SamAttention _, _, _, c_per_head = query.shape attn = query @ key.permute(0, 1, 3, 2) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens attn = attn / (c_per_head**0.5) attn = torch.softmax(attn, dim=-1) if attention_similarity is not None: attn = attn + attention_similarity attn = torch.softmax(attn, dim=-1) # Get output out = attn @ value out = self._recombine_heads(out, point_batch_size) out = self.out_proj(out) return out
class_definition
9,141
11,888
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,143
class SamSdpaAttention(SamAttention): """ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. Using SDPA instead of the default attention. """ def __init__(self, config, downsample_rate=None): super().__init__(config, downsample_rate) def forward(self, query: Tensor, key: Tensor, value: Tensor, attention_similarity: Tensor = None) -> Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = query.shape[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # Scaled dot product attention attn_mask = None if attention_similarity is not None: attn_mask = attention_similarity.unsqueeze(1).expand(-1, self.num_attention_heads, -1, -1) out = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask) # Get output out = self._recombine_heads(out, point_batch_size) out = self.out_proj(out) return out
class_definition
11,891
13,180
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,144
class SamTwoWayAttentionBlock(nn.Module): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__() self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = SAM_ATTENTION_CLASSES[config._attn_implementation](config, downsample_rate=1) self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_token_to_image = SAM_ATTENTION_CLASSES[config._attn_implementation]( config, downsample_rate=attention_downsample_rate ) self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.mlp = SamMLPBlock(config) self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_image_to_token = SAM_ATTENTION_CLASSES[config._attn_implementation]( config, downsample_rate=attention_downsample_rate ) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, output_attentions: bool = False, ): # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_token_to_image( query=query, key=key, value=keys, attention_similarity=attention_similarity ) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) outputs = (queries, keys) if output_attentions: outputs = outputs + (attn_out,) else: outputs = outputs + (None,) return outputs
class_definition
13,270
16,798
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,145
class SamTwoWayTransformer(nn.Module): def __init__(self, config: SamMaskDecoderConfig): super().__init__() self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = nn.ModuleList() for i in range(self.num_hidden_layers): self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) self.final_attn_token_to_image = SAM_ATTENTION_CLASSES[config._attn_implementation](config) self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) def forward( self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_attentions = () if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: if target_embedding is not None: queries += target_embedding queries, keys, attention_outputs = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attention_outputs,) # Apply the final attenion layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys, all_attentions
class_definition
16,801
19,576
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,146
class SamFeedForward(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False ): super().__init__() self.num_layers = num_layers self.activation = nn.ReLU() self.proj_in = nn.Linear(input_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) self.sigmoid_output = sigmoid_output def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = F.sigmoid(hidden_states) return hidden_states
class_definition
19,579
20,523
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,147
class SamMaskDecoder(nn.Module): def __init__(self, config: SamMaskDecoderConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.iou_token = nn.Embedding(1, self.hidden_size) self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) self.transformer = SamTwoWayTransformer(config) # should we create a new class for this? self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) self.upscale_layer_norm = SamLayerNorm(self.hidden_size // 4, data_format="channels_first") self.activation = nn.GELU() mlps_list = [] for _ in range(self.num_mask_tokens): mlps_list += [SamFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) self.iou_prediction_head = SamFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth ) def forward( self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, output_attentions: Optional[bool] = None, attention_similarity: torch.Tensor = None, target_embedding: torch.Tensor = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): the embeddings from the image encoder image_positional_embedding (`torch.Tensor`): positional encoding with the shape of image_embeddings sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes dense_prompt_embeddings (`torch.Tensor`): the embeddings of the mask inputs multimask_output (bool): Whether to return multiple masks or a single mask. output_attentions (bool, *optional*): Whether or not to return the attentions tensors of all attention layers. """ batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] # Concatenate output tokens output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings.sum().item() != 0: tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) # Expand per-image data in batch direction to be per-point image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) # Run the transformer, image_positional_embedding are consumed point_embedding, image_embeddings, attentions = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens image_embeddings = image_embeddings.transpose(2, 3).reshape( batch_size * point_batch_size, num_channels, height, width ) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width) masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width) # Generate mask quality predictions iou_pred = self.iou_prediction_head(iou_token_out) # Select the correct mask or masks for output if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] outputs = (masks, iou_pred) if output_attentions: outputs = outputs + (attentions,) else: outputs = outputs + (None,) return outputs
class_definition
20,526
26,191
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,148
class SamPositionalEmbedding(nn.Module): def __init__(self, config): super().__init__() self.scale = config.hidden_size // 2 self.register_buffer("positional_embedding", self.scale * torch.randn((2, config.num_pos_feats))) def forward(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = input_coords.clone() if input_shape is not None: coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1] coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0] # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = coordinates.to(self.positional_embedding.dtype) coordinates = coordinates @ self.positional_embedding coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
class_definition
26,194
27,247
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,149
class SamMaskEmbedding(nn.Module): def __init__(self, config: SamPromptEncoderConfig): super().__init__() self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2) self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2) self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1) self.layer_norm1 = SamLayerNorm( self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first" ) self.layer_norm2 = SamLayerNorm( self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first" ) def forward(self, masks): hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) return dense_embeddings
class_definition
27,250
28,500
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,150
class SamPromptEncoder(nn.Module): def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding): super().__init__() self.shared_embedding = shared_patch_embedding self.mask_embed = SamMaskEmbedding(config) self.no_mask_embed = nn.Embedding(1, config.hidden_size) self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) self.input_image_size = config.image_size self.point_embed = nn.ModuleList( [nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)] ) self.hidden_size = config.hidden_size self.not_a_point_embed = nn.Embedding(1, config.hidden_size) def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1]) target_labels_shape = (points.shape[0], points.shape[1], 1) padding_point = torch.zeros(target_point_shape, device=points.device) padding_label = -torch.ones(target_labels_shape, device=labels.device) points = torch.cat([points, padding_point], dim=2) labels = torch.cat([labels, padding_label], dim=2) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) # torch.where and expanding the labels tensor is required by the ONNX export point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding) # This is required for the ONNX export. The dtype, device need to be explicitely # specificed as otherwise torch.onnx.export interprets as double point_embedding = torch.where( labels[..., None] != -10, point_embedding, torch.tensor(0.0, dtype=point_embedding.dtype, device=point_embedding.device), ) point_embedding = torch.where( (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0].weight[None, None, :, :], point_embedding, ) point_embedding = torch.where( (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1].weight[None, None, :, :], point_embedding, ) return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = boxes.shape[:2] coords = boxes.reshape(batch_size, nb_boxes, 2, 2) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding[:, :, 0, :] += self.point_embed[2].weight corner_embedding[:, :, 1, :] += self.point_embed[3].weight return corner_embedding def forward( self, input_points: Optional[Tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`torch.Tensor`, *optional*): point coordinates and labels to embed. boxes (`torch.Tensor`, *optional*): boxes to embed masks (`torch.Tensor`, *optional*): masks to embed """ sparse_embeddings = None batch_size = 1 target_device = self.shared_embedding.positional_embedding.device if input_points is not None: batch_size, point_batch_size = input_points.shape[:2] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = point_embeddings if input_boxes is not None: batch_size = input_boxes.shape[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) if sparse_embeddings is None: sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device) return sparse_embeddings, dense_embeddings
class_definition
28,503
33,604
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,151
class SamVisionAttention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size): super().__init__() input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`torch.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos( self, attn: torch.Tensor, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (`torch.Tensor`): attention map. query (`torch.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`torch.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`torch.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: attn (`torch.Tensor`): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = query.shape reshaped_query = query.reshape(batch_size, query_height, query_width, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width) attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width) return attn def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_weights = (query * self.scale) @ key.transpose(-2, -1) if self.use_rel_pos: attn_weights = self.add_decomposed_rel_pos( attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) if output_attentions: outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs
class_definition
33,607
39,710
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,152
class SamVisionSdpaAttention(SamVisionAttention): """ Multi-head Attention block with relative position embeddings. Using SDPA instead of the default attention. """ def __init__(self, config, window_size): super().__init__(config, window_size) def add_decomposed_rel_pos( self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 This method is reimplemented to follow the implementation in: https://github.com/pytorch-labs/segment-anything-fast/blob/main/segment_anything_fast/modeling/image_encoder.py # noqa B950 This implementation is more memory efficient when using SDPA in the forward method. Args: q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = query.shape reshaped_query = query.reshape(batch_size, query_height, query_width, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) rel_h = rel_h.unsqueeze(-1) rel_w = rel_w.unsqueeze(-2) rel_h = rel_h.reshape(batch_size, query_height * query_width, key_height, 1) rel_w = rel_w.reshape(batch_size, query_height * query_width, 1, key_width) return rel_h, rel_w def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, B, nHead, H * W, C) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (B * nHead, H * W, C) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) rel_h, rel_w = None, None if self.use_rel_pos: rel_h, rel_w = self.add_decomposed_rel_pos( query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) query = query.view(batch_size, self.num_attention_heads, height * width, -1) key = key.view(batch_size, self.num_attention_heads, height * width, -1) value = value.view(batch_size, self.num_attention_heads, height * width, -1) if self.use_rel_pos: rel_h = rel_h.view(batch_size, self.num_attention_heads, rel_h.size(1), rel_h.size(2), rel_h.size(3)) rel_w = rel_w.view(batch_size, self.num_attention_heads, rel_w.size(1), rel_w.size(2), rel_w.size(3)) attn_bias = (rel_h + rel_w).view( batch_size, self.num_attention_heads, rel_h.size(2), rel_h.size(3) * rel_w.size(4) ) attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attn_bias) else: attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value) attn_output = ( attn_output.view(batch_size, self.num_attention_heads, height, width, -1) .permute(0, 2, 3, 1, 4) .reshape(batch_size, height, width, -1) ) attn_output = self.proj(attn_output) if output_attentions: # For output_attentions, calculate the attention weights attn_weights = (query @ key.transpose(-2, -1)) * self.scale if attn_bias is not None: attn_weights = attn_weights + attn_bias attn_weights = F.softmax(attn_weights, dim=-1) outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs
class_definition
39,713
44,476
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,153
class SamVisionLayer(nn.Module): def __init__(self, config, window_size): super().__init__() self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attn = SAM_VISION_ATTENTION_CLASSES[config._attn_implementation](config, window_size) self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = SamMLPBlock(config) self.window_size = window_size def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: """ Args: Partition into non-overlapping windows with padding if needed. hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window size. Returns: windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel]. (pad_height, pad_width): padded height and width before partition """ batch_size, height, width, channel = hidden_states.shape pad_h = (window_size - height % window_size) % window_size pad_w = (window_size - width % window_size) % window_size hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h)) pad_height, pad_width = height + pad_h, width + pad_w hidden_states = hidden_states.reshape( batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel ) windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel) return windows, (pad_height, pad_width) def window_unpartition( self, windows: torch.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] ) -> torch.Tensor: """ Args: Window unpartition into original sequences and removing padding. hidden_states (tensor): input tokens with [batch_size * num_windows, window_size, window_size, channel]. window_size (int): window size. padding_shape (Tuple): padded height and width (pad_height, pad_width). original_shape (Tuple): original height and width (height, width) before padding. Returns: hidden_states: unpartitioned sequences with [batch_size, height, width, channel]. """ pad_height, pad_width = padding_shape height, width = original_shape batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size) hidden_states = windows.reshape( batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1 ) hidden_states = ( hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1) ) hidden_states = hidden_states[:, :height, :width, :].contiguous() return hidden_states def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) # Window partition if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) hidden_states, attn_weights = self.attn( hidden_states=hidden_states, output_attentions=output_attentions, ) # Reverse window partition if self.window_size > 0: hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
44,585
48,772
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,154
class SamVisionNeck(nn.Module): def __init__(self, config: SamVisionConfig): super().__init__() self.config = config self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False) self.layer_norm1 = SamLayerNorm(config.output_channels, data_format="channels_first") self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False) self.layer_norm2 = SamLayerNorm(config.output_channels, data_format="channels_first") def forward(self, hidden_states): hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) return hidden_states
class_definition
48,775
49,658
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,155
class SamVisionEncoder(nn.Module): def __init__(self, config: SamVisionConfig): super().__init__() self.config = config self.image_size = config.image_size self.patch_embed = SamPatchEmbeddings(config) self.pos_embed = None if config.use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros( 1, config.image_size // config.patch_size, config.image_size // config.patch_size, config.hidden_size, ) ) self.layers = nn.ModuleList() for i in range(config.num_hidden_layers): layer = SamVisionLayer( config, window_size=config.window_size if i not in config.global_attn_indexes else 0, ) self.layers.append(layer) self.neck = SamVisionNeck(config) self.gradient_checkpointing = False def get_input_embeddings(self): return self.patch_embed def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SamVisionEncoderOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) if self.pos_embed is not None: hidden_states = hidden_states + self.pos_embed all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, ) else: layer_outputs = layer_module(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.neck(hidden_states) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_self_attentions,) return outputs return SamVisionEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
49,661
53,093
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,156
class SamPreTrainedModel(PreTrainedModel): config_class = SamConfig base_model_prefix = "sam" main_input_name = "pixel_values" _no_split_modules = ["SamVisionAttention"] supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
class_definition
53,096
53,861
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,157
class SamModel(SamPreTrainedModel): _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"] def __init__(self, config): super().__init__(config) self.shared_image_embedding = SamPositionalEmbedding(config.vision_config) self.vision_encoder = SamVisionEncoder(config.vision_config) self.prompt_encoder = SamPromptEncoder(config.prompt_encoder_config, self.shared_image_embedding) self.mask_decoder = SamMaskDecoder(config.mask_decoder_config) self.post_init() def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self): size = self.config.prompt_encoder_config.image_embedding_size target_device = self.shared_image_embedding.positional_embedding.device target_dtype = self.shared_image_embedding.positional_embedding.dtype grid = torch.ones((size, size), device=target_device, dtype=target_dtype) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / size x_embed = x_embed / size positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1)) return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width @torch.no_grad() def get_image_embeddings( self, pixel_values, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ vision_output = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings = vision_output[0] return image_embeddings @torch.no_grad() def get_prompt_embeddings( self, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, image_embeddings: Optional[torch.FloatTensor] = None, multimask_output: bool = True, attention_similarity: Optional[torch.FloatTensor] = None, target_embedding: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> List[Dict[str, torch.Tensor]]: r""" Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoModel, AutoProcessor >>> model = AutoModel.from_pretrained("facebook/sam-vit-base") >>> processor = AutoProcessor.from_pretrained("facebook/sam-vit-base") >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png" >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") >>> input_points = [[[400, 650]]] # 2D location of a window on the car >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt") >>> # Get segmentation mask >>> outputs = model(**inputs) >>> # Postprocess masks >>> masks = processor.post_process_masks( ... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"] ... ) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None and image_embeddings is None: raise ValueError("Either pixel_values or image_embeddings must be provided.") if pixel_values is not None and image_embeddings is not None: raise ValueError("Only one of pixel_values and image_embeddings can be provided.") if input_points is not None and len(input_points.shape) != 4: raise ValueError( "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", " got {}.".format(input_points.shape), ) if input_boxes is not None and len(input_boxes.shape) != 3: raise ValueError( "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", " got {}.".format(input_boxes.shape), ) if input_points is not None and input_boxes is not None: point_batch_size = input_points.shape[1] box_batch_size = input_boxes.shape[1] if point_batch_size != box_batch_size: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( point_batch_size, box_batch_size ) ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0] image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) vision_attentions = None vision_hidden_states = None if pixel_values is not None: vision_outputs = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings = vision_outputs[0] if output_hidden_states: vision_hidden_states = vision_outputs[1] if output_attentions: vision_attentions = vision_outputs[-1] if input_points is not None and input_labels is None: input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device) if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: raise ValueError( "The batch size of the image embeddings and the input points must be the same. ", "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), " if you want to pass multiple points for the same image, make sure that you passed ", " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", ) sparse_embeddings, dense_embeddings = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions, ) if not return_dict: output = (iou_predictions, low_res_masks) if output_hidden_states: output = output + (vision_hidden_states,) if output_attentions: output = output + (vision_attentions, mask_decoder_attentions) return output return SamImageSegmentationOutput( iou_scores=iou_predictions, pred_masks=low_res_masks, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, )
class_definition
60,247
71,276
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_sam.py
null
4,158
class SamImagesKwargs(ImagesKwargs): segmentation_maps: Optional[ImageInput] input_points: Optional[List[List[float]]] input_labels: Optional[List[List[int]]] input_boxes: Optional[List[List[List[float]]]] point_pad_value: Optional[int]
class_definition
1,105
1,361
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/processing_sam.py
null
4,159
class SamProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: SamImagesKwargs _defaults = { "images_kwargs": { "point_pad_value": -10, } }
class_definition
1,364
1,552
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/processing_sam.py
null
4,160
class SamProcessor(ProcessorMixin): r""" Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a single processor. [`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of [`~SamImageProcessor.__call__`] for more information. Args: image_processor (`SamImageProcessor`): An instance of [`SamImageProcessor`]. The image processor is a required input. """ attributes = ["image_processor"] image_processor_class = "SamImageProcessor" # For backward compatibility. See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details. optional_call_args = [ "segmentation_maps", "input_points", "input_labels", "input_boxes", ] def __init__(self, image_processor): super().__init__(image_processor) self.target_size = self.image_processor.size["longest_edge"] def __call__( self, images: Optional[ImageInput] = None, # The following is to capture `segmentation_maps`, `input_points`, `input_labels` and `input_boxes` # arguments that may be passed as a positional argument. # See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details, # or this conversation for more context: # https://github.com/huggingface/transformers/pull/32544#discussion_r1720208116 # This behavior is only needed for backward compatibility and will be removed in future versions. *args, # to be deprecated text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, audio: Optional[AudioInput] = None, video: Optional[VideoInput] = None, **kwargs, ) -> BatchEncoding: """ This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D points and bounding boxes for the model if they are provided. """ output_kwargs = self._merge_kwargs( SamProcessorKwargs, tokenizer_init_kwargs={}, **kwargs, **self.prepare_and_validate_optional_call_args(*args), ) input_points = output_kwargs["images_kwargs"].pop("input_points", None) input_labels = output_kwargs["images_kwargs"].pop("input_labels", None) input_boxes = output_kwargs["images_kwargs"].pop("input_boxes", None) encoding_image_processor = self.image_processor( images, **output_kwargs["images_kwargs"], ) # pop arguments that are not used in the foward but used nevertheless original_sizes = encoding_image_processor["original_sizes"] if hasattr(original_sizes, "numpy"): # Checks if Torch or TF tensor original_sizes = original_sizes.numpy() input_points, input_labels, input_boxes = self._check_and_preprocess_points( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, ) encoding_image_processor = self._normalize_and_convert( encoding_image_processor, original_sizes, input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, return_tensors=output_kwargs["common_kwargs"].get("return_tensors"), point_pad_value=output_kwargs["images_kwargs"].get("point_pad_value"), ) return encoding_image_processor def _normalize_and_convert( self, encoding_image_processor, original_sizes, input_points=None, input_labels=None, input_boxes=None, return_tensors="pt", point_pad_value=-10, ): if input_points is not None: if len(original_sizes) != len(input_points): input_points = [ self._normalize_coordinates(self.target_size, point, original_sizes[0]) for point in input_points ] else: input_points = [ self._normalize_coordinates(self.target_size, point, original_size) for point, original_size in zip(input_points, original_sizes) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points): if input_labels is not None: input_points, input_labels = self._pad_points_and_labels( input_points, input_labels, point_pad_value ) input_points = np.array(input_points) if input_labels is not None: input_labels = np.array(input_labels) if input_boxes is not None: if len(original_sizes) != len(input_boxes): input_boxes = [ self._normalize_coordinates(self.target_size, box, original_sizes[0], is_bounding_box=True) for box in input_boxes ] else: input_boxes = [ self._normalize_coordinates(self.target_size, box, original_size, is_bounding_box=True) for box, original_size in zip(input_boxes, original_sizes) ] input_boxes = np.array(input_boxes) if input_boxes is not None: if return_tensors == "pt": input_boxes = torch.from_numpy(input_boxes) # boxes batch size of 1 by default input_boxes = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes elif return_tensors == "tf": input_boxes = tf.convert_to_tensor(input_boxes) # boxes batch size of 1 by default input_boxes = tf.expand_dims(input_boxes, 1) if len(input_boxes.shape) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes}) if input_points is not None: if return_tensors == "pt": input_points = torch.from_numpy(input_points) # point batch size of 1 by default input_points = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points elif return_tensors == "tf": input_points = tf.convert_to_tensor(input_points) # point batch size of 1 by default input_points = tf.expand_dims(input_points, 1) if len(input_points.shape) != 4 else input_points encoding_image_processor.update({"input_points": input_points}) if input_labels is not None: if return_tensors == "pt": input_labels = torch.from_numpy(input_labels) # point batch size of 1 by default input_labels = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels elif return_tensors == "tf": input_labels = tf.convert_to_tensor(input_labels) # point batch size of 1 by default input_labels = tf.expand_dims(input_labels, 1) if len(input_labels.shape) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels}) return encoding_image_processor def _pad_points_and_labels(self, input_points, input_labels, point_pad_value): r""" The method pads the 2D points and labels to the maximum number of points in the batch. """ expected_nb_points = max([point.shape[0] for point in input_points]) processed_input_points = [] for i, point in enumerate(input_points): if point.shape[0] != expected_nb_points: point = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2)) + point_pad_value], axis=0 ) input_labels[i] = np.append(input_labels[i], [point_pad_value]) processed_input_points.append(point) input_points = processed_input_points return input_points, input_labels def _normalize_coordinates( self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False ) -> np.ndarray: """ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format. """ old_h, old_w = original_size new_h, new_w = self.image_processor._get_preprocess_shape(original_size, longest_edge=target_size) coords = deepcopy(coords).astype(float) if is_bounding_box: coords = coords.reshape(-1, 2, 2) coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) if is_bounding_box: coords = coords.reshape(-1, 4) return coords def _check_and_preprocess_points( self, input_points=None, input_labels=None, input_boxes=None, ): r""" Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`, it is converted to a `numpy.ndarray` and then to a `list`. """ if input_points is not None: if hasattr(input_points, "numpy"): # Checks for TF or Torch tensor input_points = input_points.numpy().tolist() if not isinstance(input_points, list) or not isinstance(input_points[0], list): raise ValueError("Input points must be a list of list of floating points.") input_points = [np.array(input_point) for input_point in input_points] else: input_points = None if input_labels is not None: if hasattr(input_labels, "numpy"): input_labels = input_labels.numpy().tolist() if not isinstance(input_labels, list) or not isinstance(input_labels[0], list): raise ValueError("Input labels must be a list of list integers.") input_labels = [np.array(label) for label in input_labels] else: input_labels = None if input_boxes is not None: if hasattr(input_boxes, "numpy"): input_boxes = input_boxes.numpy().tolist() if ( not isinstance(input_boxes, list) or not isinstance(input_boxes[0], list) or not isinstance(input_boxes[0][0], list) ): raise ValueError("Input boxes must be a list of list of list of floating points.") input_boxes = [np.array(box).astype(np.float32) for box in input_boxes] else: input_boxes = None return input_points, input_labels, input_boxes @property def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(image_processor_input_names)) def post_process_masks(self, *args, **kwargs): return self.image_processor.post_process_masks(*args, **kwargs)
class_definition
1,555
12,900
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/processing_sam.py
null
4,161
class SamImageProcessor(BaseImageProcessor): r""" Constructs a SAM image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`): Size of the output image after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the `preprocess` method. mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`): Size of the output segmentation map after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the `preprocess` method. pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`): Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess` method. mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`): Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, mask_size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, pad_size: int = None, mask_pad_size: int = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024} pad_size = get_size_dict(pad_size, default_to_square=True) mask_size = mask_size if mask_size is not None else {"longest_edge": 256} mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256} mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) self.do_resize = do_resize self.size = size self.mask_size = mask_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self.mask_pad_size = mask_pad_size self.do_convert_rgb = do_convert_rgb def pad_image( self, image: np.ndarray, pad_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom. Args: image (`np.ndarray`): Image to pad. pad_size (`Dict[str, int]`): Size of the output image after padding. data_format (`str` or `ChannelDimension`, *optional*): The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the `data_format` of the `image` will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = pad_size["height"], pad_size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) pad_width = output_width - input_width pad_height = output_height - input_height padded_image = pad( image, ((0, pad_height), (0, pad_width)), data_format=data_format, input_data_format=input_data_format, **kwargs, ) return padded_image def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int): """ Compute the output size given input size and target long side length. """ oldh, oldw = old_shape scale = longest_edge * 1.0 / max(oldh, oldw) newh, neww = oldh * scale, oldw * scale newh = int(newh + 0.5) neww = int(neww + 0.5) return (newh, neww) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest edge of the image will be resized to the specified size, while the other edge will be resized to maintain the aspect ratio. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "longest_edge" not in size: raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}") input_size = get_image_size(image, channel_dim=input_data_format) output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"]) return resize( image, size=(output_height, output_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess( self, image: ImageInput, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, rescale_factor: Optional[float] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) reshaped_input_size = get_image_size(image, channel_dim=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_pad: image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format) return image, reshaped_input_size def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]: image = to_numpy_array(image) # PIL RGBA images are converted to RGB if do_convert_rgb: image = convert_to_rgb(image) # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) original_size = get_image_size(image, channel_dim=input_data_format) image, reshaped_input_size = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image, original_size, reshaped_input_size def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: Optional[bool] = None, mask_size: Dict[str, int] = None, do_pad: Optional[bool] = None, mask_pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) original_size = get_image_size(segmentation_map, channel_dim=input_data_format) segmentation_map, _ = self._preprocess( image=segmentation_map, do_resize=do_resize, size=mask_size, resample=PILImageResampling.NEAREST, do_rescale=False, do_normalize=False, do_pad=do_pad, pad_size=mask_pad_size, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) segmentation_map = segmentation_map.astype(np.int64) return segmentation_map, original_size @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, mask_size: Optional[Dict[str, int]] = None, resample: Optional["PILImageResampling"] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, mask_pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. segmentation_maps (`ImageInput`, *optional*): Segmentation map to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`): Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by rescaling factor. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to apply to the image pixel values. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`): Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and `pad_size["width"]` if `do_pad` is set to `True`. mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`): Controls the size of the padding applied to the segmentation map. The image is padded to `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size mask_size = mask_size if mask_size is not None else self.mask_size mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size pad_size = get_size_dict(pad_size, default_to_square=True) mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size. do_resize=do_resize, size=size, resample=resample, ) images, original_sizes, reshaped_input_sizes = zip( *( self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format, ) for img in images ) ) data = { "pixel_values": images, "original_sizes": original_sizes, "reshaped_input_sizes": reshaped_input_sizes, } if segmentation_maps is not None: segmentation_maps, original_mask_sizes = zip( *( self._preprocess_mask( segmentation_map=mask, do_resize=do_resize, mask_size=mask_size, do_pad=do_pad, mask_pad_size=mask_pad_size, input_data_format=input_data_format, ) for mask in segmentation_maps ) ) # masks should start out the same size as input images assert all( original_im_size == original_mask_size for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes) ), "Segmentation maps should be the same size as input images." data["labels"] = segmentation_maps return BatchFeature(data=data, tensor_type=return_tensors) def post_process_masks( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None, return_tensors="pt", ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. return_tensors (`str`, *optional*, defaults to `"pt"`): If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors. Returns: (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ if return_tensors == "pt": return self._post_process_masks_pt( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) elif return_tensors == "tf": return self._post_process_masks_tf( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) else: raise ValueError("return_tensors must be either 'pt' or 'tf'") def _post_process_masks_pt( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["torch"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) if isinstance(original_sizes, (torch.Tensor, np.ndarray)): original_sizes = original_sizes.tolist() if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)): reshaped_input_sizes = reshaped_input_sizes.tolist() output_masks = [] for i, original_size in enumerate(original_sizes): if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) if binarize: interpolated_mask = interpolated_mask > mask_threshold output_masks.append(interpolated_mask) return output_masks def _post_process_masks_tf( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`tf.Tensor`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`tf.Tensor`): The original size of the images before resizing for input to the model, in (height, width) format. reshaped_input_sizes (`tf.Tensor`): The size of the image input to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["tf"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) output_masks = [] for i, original_size in enumerate(original_sizes): # tf.image expects NHWC, we transpose the NCHW inputs for it mask = tf.transpose(masks[i], perm=[0, 2, 3, 1]) interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear") interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :] interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear") if binarize: interpolated_mask = interpolated_mask > mask_threshold # And then we transpose them back at the end output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2])) return output_masks def post_process_for_mask_generation( self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt" ): """ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks. Args: all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted segmentation masks all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted iou scores all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all bounding boxes of the predicted masks crops_nms_thresh (`float`): Threshold for NMS (Non Maximum Suppression) algorithm. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh) elif return_tensors == "tf": return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh) def generate_crop_boxes( self, image, target_size, crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[List[int]] = 1, device: Optional["torch.device"] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: str = "pt", ): """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (`np.array`): Input original image target_size (`int`): Target size of the resized image crop_n_layers (`int`, *optional*, defaults to 0): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`float`, *optional*, defaults to 512/1500): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*, defaults to 32): Number of points to sample from each crop. crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. device (`torch.device`, *optional*, defaults to None): Device to use for the computation. If None, cpu will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes( image, target_size, crop_n_layers, overlap_ratio, points_per_crop, crop_n_points_downscale_factor, input_data_format, ) if return_tensors == "pt": if device is None: device = torch.device("cpu") crop_boxes = torch.tensor(crop_boxes, device=device) points_per_crop = torch.tensor(points_per_crop, device=device) # cropped_images stays as np input_labels = torch.tensor(input_labels, device=device) elif return_tensors == "tf": if device is not None: raise ValueError("device is not a supported argument when return_tensors is tf!") crop_boxes = tf.convert_to_tensor(crop_boxes) points_per_crop = tf.convert_to_tensor(points_per_crop) # cropped_images stays as np input_labels = tf.convert_to_tensor(input_labels) else: raise ValueError("return_tensors must be either 'pt' or 'tf'.") return crop_boxes, points_per_crop, cropped_images, input_labels def filter_masks( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, return_tensors="pt", ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`Union[torch.Tensor, tf.Tensor]`): Input masks. iou_scores (`Union[torch.Tensor, tf.Tensor]`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return self._filter_masks_pt( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) elif return_tensors == "tf": return self._filter_masks_tf( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) def _filter_masks_pt( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`torch.Tensor`): Input masks. iou_scores (`torch.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["torch"]) original_height, original_width = original_size iou_scores = iou_scores.flatten(0, 1) masks = masks.flatten(0, 1) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") if masks.device != iou_scores.device: iou_scores = iou_scores.to(masks.device) batch_size = masks.shape[0] keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box(masks) keep_mask = ~_is_box_near_crop_edge( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_pytorch(masks) return masks, scores, converted_boxes def _filter_masks_tf( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`tf.Tensor`): Input masks. iou_scores (`tf.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["tf"]) original_height, original_width = original_size iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]]) masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]]) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") batch_size = masks.shape[0] keep_mask = tf.ones(batch_size, dtype=tf.bool) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box_tf(masks) keep_mask = ~_is_box_near_crop_edge_tf( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_tf(masks) return masks, scores, converted_boxes
class_definition
1,853
48,363
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/image_processing_sam.py
null
4,162
class TFSamVisionEncoderOutput(ModelOutput): """ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection layer to the pooler_output. Args: image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None
class_definition
1,610
3,338
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,163
class TFSamImageSegmentationOutput(ModelOutput): """ Base class for Segment-Anything model's output Args: iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`): The iou scores of the predicted masks. pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`): The predicted low resolutions masks. Needs to be post-processed by the processor vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ iou_scores: tf.Tensor = None pred_masks: tf.Tensor = None vision_hidden_states: Tuple[tf.Tensor, ...] | None = None vision_attentions: Tuple[tf.Tensor, ...] | None = None mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None
class_definition
3,352
5,377
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,164
class TFSamPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = keras.layers.Conv2D( hidden_size, kernel_size=patch_size, strides=patch_size, name="projection" ) def call(self, pixel_values): batch_size, num_channels, height, width = shape_list(pixel_values) if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1])) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels])
class_definition
5,380
7,478
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,165
class TFSamMLPBlock(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1") self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2") self.act = ACT2FN[config.hidden_act] self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lin1", None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.config.hidden_size]) if getattr(self, "lin2", None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.config.mlp_dim])
class_definition
7,481
8,493
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,166
class TFSamLayerNorm(keras.layers.Layer): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs): super().__init__(**kwargs) self.eps = eps self.data_format = data_format self.normalized_shape = normalized_shape if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") def build(self, input_shape): self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight") self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias") super().build(input_shape) def call(self, x: tf.Tensor) -> tf.Tensor: if self.data_format == "channels_last": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1) elif self.data_format == "channels_first": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1) return x
class_definition
8,496
9,906
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,167
class TFSamAttention(keras.layers.Layer): """ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads if self.internal_dim % config.num_attention_heads != 0: raise ValueError("num_attention_heads must divide hidden_size.") self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj") self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj") self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj") self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj") def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor: batch, point_batch_size, n_tokens, channel = shape_list(hidden_states) c_per_head = channel // num_attention_heads hidden_states = tf.reshape( hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) ) return tf.transpose(hidden_states, perm=[0, 2, 1, 3]) def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor: batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3]) return tf.reshape( hidden_states, (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head), ) def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = shape_list(query)[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # SamAttention _, _, _, c_per_head = shape_list(query) attn = tf.matmul( query, tf.transpose(key, perm=[0, 1, 3, 2]) ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens attn = attn / tf.math.sqrt(float(c_per_head)) attn = tf.nn.softmax(attn, axis=-1) # Get output out = tf.matmul(attn, value) out = self._recombine_heads(out, point_batch_size) out = self.out_proj(out) return out def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.hidden_size]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.hidden_size]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.internal_dim])
class_definition
9,909
13,566
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,168
class TFSamTwoWayAttentionBlock(keras.layers.Layer): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__(**kwargs) self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1") self.cross_attn_token_to_image = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image" ) self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3") self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4") self.cross_attn_image_to_token = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token" ) self.skip_first_layer_pe = skip_first_layer_pe def call( self, queries: tf.Tensor, keys: tf.Tensor, query_point_embedding: tf.Tensor, key_point_embedding: tf.Tensor, output_attentions: bool = False, ): # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) outputs = (queries, keys) if output_attentions: outputs = outputs + (attn_out,) else: outputs = outputs + (None,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_token_to_image", None) is not None: with tf.name_scope(self.cross_attn_token_to_image.name): self.cross_attn_token_to_image.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm3", None) is not None: with tf.name_scope(self.layer_norm3.name): self.layer_norm3.build([None, None, None, self.hidden_size]) if getattr(self, "layer_norm4", None) is not None: with tf.name_scope(self.layer_norm4.name): self.layer_norm4.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_image_to_token", None) is not None: with tf.name_scope(self.cross_attn_image_to_token.name): self.cross_attn_image_to_token.build(None)
class_definition
13,569
18,687
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,169
class TFSamTwoWayTransformer(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = [] for i in range(self.num_hidden_layers): self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}")) self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image") self.layer_norm_final_attn = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="layer_norm_final_attn" ) def call( self, point_embeddings: tf.Tensor, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_attentions = () if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None] image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None] # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: queries, keys, attention_outputs = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attention_outputs,) # Apply the final attenion layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys, all_attentions def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "final_attn_token_to_image", None) is not None: with tf.name_scope(self.final_attn_token_to_image.name): self.final_attn_token_to_image.build(None) if getattr(self, "layer_norm_final_attn", None) is not None: with tf.name_scope(self.layer_norm_final_attn.name): self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size]) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
18,690
22,039
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,170
class TFSamFeedForward(keras.layers.Layer): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs ): super().__init__(**kwargs) self.num_layers = num_layers self.activation = keras.layers.ReLU() self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in") self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out") self.layers = [ keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}") for i in range(num_layers - 2) ] self.sigmoid_output = sigmoid_output self.hidden_dim = hidden_dim self.input_dim = input_dim def call(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = tf.sigmoid(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "proj_in", None) is not None: with tf.name_scope(self.proj_in.name): self.proj_in.build([None, None, self.input_dim]) if getattr(self, "proj_out", None) is not None: with tf.name_scope(self.proj_out.name): self.proj_out.build([None, None, self.hidden_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build([None, None, self.hidden_dim])
class_definition
22,042
23,898
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,171
class TFSamMaskDecoder(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.transformer = TFSamTwoWayTransformer(config, name="transformer") self.upscale_conv1 = keras.layers.Conv2DTranspose( self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first" ) self.upscale_conv2 = keras.layers.Conv2DTranspose( self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first" ) self.upscale_layer_norm = TFSamLayerNorm( self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm" ) self.activation = tf.nn.gelu mlps_list = [] for i in range(self.num_mask_tokens): mlps_list += [ TFSamFeedForward( self.hidden_size, self.hidden_size, self.hidden_size // 8, 3, name=f"output_hypernetworks_mlps_._{i}", ) ] self.output_hypernetworks_mlps = mlps_list self.iou_prediction_head = TFSamFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth, name="iou_prediction_head", ) def build(self, input_shape=None): if self.built: return self.built = True self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True) self.mask_tokens = self.add_weight( shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True ) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "upscale_conv1", None) is not None: with tf.name_scope(self.upscale_conv1.name): self.upscale_conv1.build([None, self.hidden_size, None, None]) if getattr(self, "upscale_conv2", None) is not None: with tf.name_scope(self.upscale_conv2.name): self.upscale_conv2.build([None, self.hidden_size // 4, None, None]) if getattr(self, "upscale_layer_norm", None) is not None: with tf.name_scope(self.upscale_layer_norm.name): self.upscale_layer_norm.build(None) if getattr(self, "iou_prediction_head", None) is not None: with tf.name_scope(self.iou_prediction_head.name): self.iou_prediction_head.build(None) for mlp in self.output_hypernetworks_mlps: with tf.name_scope(mlp.name): mlp.build(None) def call( self, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, sparse_prompt_embeddings: tf.Tensor, dense_prompt_embeddings: tf.Tensor, multimask_output: bool, output_attentions: Optional[bool] = None, ) -> Tuple[tf.Tensor, tf.Tensor]: batch_size, num_channels, height, width = shape_list(image_embeddings) point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1]) output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32) output_tokens = tf.tile( output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1] ) # Should be (batch_size, point_size, 5, 32) # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced # it with an explicit shape check to avoid data-dependent control flow which breaks XLA. if shape_list(sparse_prompt_embeddings)[1] != 0: tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2) else: tokens = output_tokens point_embeddings = tf.cast(tokens, self.iou_token.dtype) image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0) image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0) point_embedding, image_embeddings, attentions = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, output_attentions=output_attentions, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2)) image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width]) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = tf.stack(hyper_in_list, axis=2) _, num_channels, height, width = shape_list(upscaled_embedding) upscaled_embedding = tf.reshape( upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width] ) masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width]) iou_pred = self.iou_prediction_head(iou_token_out) if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] outputs = (masks, iou_pred) if output_attentions: outputs = outputs + (attentions,) else: outputs = outputs + (None,) return outputs
class_definition
23,901
30,466
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,172
class TFSamPositionalEmbedding(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.scale = config.hidden_size // 2 self.config = config def build(self, input_shape): # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized? self.positional_embedding = self.add_weight( name="positional_embedding", shape=(2, self.config.num_pos_feats), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale), trainable=False, ) super().build(input_shape) def call(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = tf.identity(input_coords) if input_shape is not None: coordinates = tf.stack( [ tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1], tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0], ], axis=-1, ) # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = tf.cast(coordinates, self.positional_embedding.dtype) coordinates = tf.matmul(coordinates, self.positional_embedding) coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1)
class_definition
30,469
32,042
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,173
class TFSamMaskEmbedding(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, **kwargs): super().__init__(**kwargs) self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1") self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2") self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3") self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1") self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2") self.config = config def call(self, masks): masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first return dense_embeddings def build(self, input_shape=None): # This class needs an explicit build method because it isn't called with the standard dummy inputs if self.built: return self.built = True with tf.name_scope("conv1"): self.conv1.build([None, None, None, 1]) with tf.name_scope("conv2"): self.conv2.build([None, None, None, self.mask_input_channels]) with tf.name_scope("conv3"): self.conv3.build([None, None, None, self.mask_input_channels * 4]) with tf.name_scope("layer_norm1"): self.layer_norm1.build([None, None, None, self.mask_input_channels]) with tf.name_scope("layer_norm2"): self.layer_norm2.build([None, None, None, self.mask_input_channels * 4])
class_definition
32,045
34,277
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,174
class TFSamPromptEncoder(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs): super().__init__(**kwargs) self.shared_embedding = shared_patch_embedding self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed") self.no_mask_embed = None self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) self.input_image_size = config.image_size self.point_embed = [] self.hidden_size = config.hidden_size self.not_a_point_embed = None self.config = config def build(self, input_shape=None): self.no_mask_embed = self.add_weight( name="no_mask_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) self.point_embed = [ self.add_weight( name=f"point_embed_._{i}.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) for i in range(self.config.num_point_embeddings) ] self.not_a_point_embed = self.add_weight( name="not_a_point_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) with tf.name_scope("mask_embed"): # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs self.mask_embed.build( (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size) ) if self.built: return self.built = True if getattr(self, "mask_embed", None) is not None: with tf.name_scope(self.mask_embed.name): self.mask_embed.build(None) def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1]) target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1) padding_point = tf.zeros(target_point_shape, dtype=points.dtype) padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype) points = tf.concat([points, padding_point], axis=2) labels = tf.concat([labels, padding_label], axis=2) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding) point_embedding = tf.where( labels[..., None] != -10, point_embedding, tf.zeros_like(point_embedding), ) point_embedding = tf.where( (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding ) point_embedding = tf.where( (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding ) return point_embedding def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = shape_list(boxes)[:2] coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2)) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding += tf.where( tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0, self.point_embed[2][0], self.point_embed[3][0], ) return corner_embedding def call( self, batch_size: Optional[int], input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], input_labels: tf.Tensor | None, input_boxes: tf.Tensor | None, input_masks: tf.Tensor | None, ) -> Tuple[tf.Tensor, tf.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`tf.Tensor`, *optional*): point coordinates and labels to embed. boxes (`tf.Tensor`, *optional*): boxes to embed masks (`tf.Tensor`, *optional*): masks to embed """ sparse_embeddings = None if input_points is not None: batch_size, point_batch_size = shape_list(input_points)[:2] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = tf.zeros( (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype ) sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2) if input_boxes is not None: batch_size = shape_list(input_boxes)[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed[0] dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1)) dense_embeddings = tf.tile( dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1]) ) if sparse_embeddings is None: sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype) return sparse_embeddings, dense_embeddings
class_definition
34,280
40,585
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,175
class TFSamVisionAttention(keras.layers.Layer): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.input_size = input_size self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.head_dim = head_dim self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv") self.proj = keras.layers.Dense(config.hidden_size, name="proj") self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") self.config = config def build(self, input_shape=None): if self.input_size is not None: # initialize relative positional embeddings self.rel_pos_h = self.add_weight( shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h" ) self.rel_pos_w = self.add_weight( shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w" ) if self.built: return self.built = True if getattr(self, "qkv", None) is not None: with tf.name_scope(self.qkv.name): self.qkv.build([None, None, self.config.hidden_size]) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, self.config.hidden_size]) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`tf.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = tf.image.resize( tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), size=(max_rel_dist, rel_pos.shape[1]), method="bilinear", ) rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist)) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0) k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32)) def add_decomposed_rel_pos( self, attn: tf.Tensor, query: tf.Tensor, rel_pos_h: tf.Tensor, rel_pos_w: tf.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> tf.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (`tf.Tensor`): attention map. query (`tf.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`tf.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`tf.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: attn (`tf.Tensor`): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = shape_list(query) reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim)) rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width)) attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2) attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width)) return attn def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor: batch_size, height, width, _ = shape_list(hidden_states) # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1)) qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4)) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = tf.unstack( tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0 ) attn_weights = tf.matmul(query * self.scale, key, transpose_b=True) if self.use_rel_pos: attn_weights = self.add_decomposed_rel_pos( attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if training: attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) else: attn_probs = attn_weights attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1)) attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4)) attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size)) attn_output = self.proj(attn_output) if output_attentions: outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs
class_definition
40,588
47,825
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,176
class TFSamVisionLayer(keras.layers.Layer): def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.attn = TFSamVisionAttention(config, window_size, name="attn") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.window_size = window_size self.config = config def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]: batch_size, height, width, channel = shape_list(hidden_states) pad_h = (window_size - height % window_size) % window_size pad_w = (window_size - width % window_size) % window_size if pad_h > 0 or pad_w > 0: hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]]) pad_height, pad_width = height + pad_h, width + pad_w hidden_states = tf.reshape( hidden_states, [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel], ) windows = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel] ) return windows, (pad_height, pad_width) def window_unpartition( self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] ) -> tf.Tensor: pad_height, pad_width = padding_shape height, width = original_shape batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size) hidden_states = tf.reshape( windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1] ) hidden_states = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1] ) if pad_height > height or pad_width > width: hidden_states = hidden_states[:, :height, :width, :] return hidden_states def call( self, hidden_states: tf.Tensor, output_attentions: Optional[bool] = False, training: Optional[bool] = False, ) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) hidden_states, attn_weights = self.attn( hidden_states=hidden_states, output_attentions=output_attentions, training=training, ) if self.window_size > 0: hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.config.hidden_size]) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.config.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None)
class_definition
47,828
51,974
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,177
class TFSamVisionNeck(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.conv1 = keras.layers.Conv2D( config.output_channels, kernel_size=1, use_bias=False, name="conv1", ) self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1") self.conv2 = keras.layers.Conv2D( config.output_channels, kernel_size=3, padding="same", use_bias=False, name="conv2", ) self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2") def call(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build([None, None, None, self.config.hidden_size]) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build([None, None, None, self.config.output_channels]) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build(None)
class_definition
51,977
53,820
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,178
class TFSamVisionEncoder(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.image_size = config.image_size self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed") self.pos_embed = None self.layers = [] for i in range(config.num_hidden_layers): layer = TFSamVisionLayer( config, window_size=config.window_size if i not in config.global_attn_indexes else 0, name=f"layers_._{i}", ) self.layers.append(layer) self.neck = TFSamVisionNeck(config, name="neck") def build(self, input_shape=None): if self.built: return self.built = True if self.config.use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = self.add_weight( shape=[ 1, self.config.image_size // self.config.patch_size, self.config.image_size // self.config.patch_size, self.config.hidden_size, ], initializer="zeros", trainable=True, name="pos_embed", ) if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "neck", None) is not None: with tf.name_scope(self.neck.name): self.neck.build(None) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) def get_input_embeddings(self): return self.patch_embed def call( self, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSamVisionEncoderOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) if self.pos_embed is not None: hidden_states = hidden_states + self.pos_embed all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.neck(hidden_states) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_self_attentions,) return outputs return TFSamVisionEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
53,823
57,757
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,179
class TFSamPreTrainedModel(TFPreTrainedModel): config_class = SamConfig base_model_prefix = "sam" main_input_name = "pixel_values"
class_definition
57,760
57,902
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,180
class TFSamModel(TFSamPreTrainedModel): _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"] def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding") self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder") self.prompt_encoder = TFSamPromptEncoder( config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder" ) self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder") self.config = config def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self): size = self.config.prompt_encoder_config.image_embedding_size grid = tf.ones((size, size)) y_embed = tf.math.cumsum(grid, axis=0) - 0.5 x_embed = tf.math.cumsum(grid, axis=1) - 0.5 y_embed = y_embed / size x_embed = x_embed / size positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1)) return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width def get_image_embeddings( self, pixel_values, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple. """ vision_output = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings = vision_output[0] return image_embeddings def get_prompt_embeddings( self, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @unpack_inputs @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) def call( self, pixel_values: TFModelInputType | None = None, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, image_embeddings: tf.Tensor | None = None, multimask_output: bool = True, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None and image_embeddings is None: raise ValueError("Either pixel_values or image_embeddings must be provided.") if pixel_values is not None and image_embeddings is not None: raise ValueError("Only one of pixel_values and image_embeddings can be provided.") if input_points is not None and len(input_points.shape) != 4: raise ValueError( "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", " got {}.".format(input_points.shape), ) if input_boxes is not None and len(input_boxes.shape) != 3: raise ValueError( "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", " got {}.".format(input_boxes.shape), ) if input_points is not None and input_boxes is not None: point_batch_size = shape_list(input_points)[1] box_batch_size = shape_list(input_boxes)[1] if point_batch_size != box_batch_size: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( point_batch_size, box_batch_size ) ) if pixel_values is not None: # Ensures that later checks pass even with an all-None shape from the serving signature pixel_values = tf.ensure_shape( pixel_values, [ None, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size, ], ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0] image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0) vision_attentions = None vision_hidden_states = None if pixel_values is not None: vision_outputs = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, training=training, ) image_embeddings = vision_outputs["last_hidden_state"] if output_hidden_states: vision_hidden_states = vision_outputs["hidden_states"] if output_attentions: vision_attentions = vision_outputs["attentions"] if input_points is not None and input_labels is None: input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32) if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: raise ValueError( "The batch size of the image embeddings and the input points must be the same. ", "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), " if you want to pass multiple points for the same image, make sure that you passed ", " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", ) sparse_embeddings, dense_embeddings = self.prompt_encoder( batch_size=shape_list(image_embeddings)[0], input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, output_attentions=output_attentions, ) if not return_dict: output = (iou_predictions, low_res_masks) if output_hidden_states: output = output + (vision_hidden_states,) if output_attentions: output = output + (vision_attentions, mask_decoder_attentions) return output return TFSamImageSegmentationOutput( iou_scores=iou_predictions, pred_masks=low_res_masks, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, ) def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput: hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None return TFSamImageSegmentationOutput( iou_scores=output.iou_scores, pred_masks=output.pred_masks, vision_hidden_states=hs if self.config.output_hidden_states else None, vision_attentions=attns if self.config.output_attentions else None, mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shared_image_embedding", None) is not None: with tf.name_scope(self.shared_image_embedding.name): self.shared_image_embedding.build(None) if getattr(self, "vision_encoder", None) is not None: with tf.name_scope(self.vision_encoder.name): self.vision_encoder.build(None) if getattr(self, "prompt_encoder", None) is not None: with tf.name_scope(self.prompt_encoder.name): self.prompt_encoder.build(None) if getattr(self, "mask_decoder", None) is not None: with tf.name_scope(self.mask_decoder.name): self.mask_decoder.build(None)
class_definition
63,666
75,449
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/modeling_tf_sam.py
null
4,181
class SamPromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.image_embedding_size = image_size // patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps
class_definition
780
2,835
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/configuration_sam.py
null
4,182
class SamMaskDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function used inside the `SamMaskDecoder` module. mlp_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 2): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsampling rate of the attention layer. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3. iou_head_depth (`int`, *optional*, defaults to 3): The number of layers in the IoU head module. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The dimensionality of the hidden states in the IoU head module. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="relu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps
class_definition
2,838
5,792
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/configuration_sam.py
null
4,183
class SamVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM ViT-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. output_channels (`int`, *optional*, defaults to 256): Dimensionality of the output channels in the Patch Encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. image_size (`int`, *optional*, defaults to 1024): Expected resolution. Target size of the resized input image. patch_size (`int`, *optional*, defaults to 16): Size of the patches to be extracted from the input image. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to query, key, value projections. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of mlp hidden dim to embedding dim. use_abs_pos (`bool`, *optional*, defaults to `True`): Whether to use absolute position embedding. use_rel_pos (`bool`, *optional*, defaults to `True`): Whether to use relative position embedding. window_size (`int`, *optional*, defaults to 14): Window size for relative position. global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`): The indexes of the global attention layers. num_pos_feats (`int`, *optional*, defaults to 128): The dimensionality of the position embedding. mlp_dim (`int`, *optional*): The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio * hidden_size`. """ base_config_key = "vision_config" def __init__( self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=128, mlp_dim=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.output_channels = output_channels self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim
class_definition
5,795
10,448
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/configuration_sam.py
null
4,184
class SamConfig(PretrainedConfig): r""" [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (Union[`dict`, `SamVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`SamVisionConfig`]. prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`]. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... SamVisionConfig, ... SamPromptEncoderConfig, ... SamMaskDecoderConfig, ... SamModel, ... ) >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration >>> configuration = SamConfig() >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration >>> model = SamModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig >>> # Initializing SAM vision, SAM Q-Former and language model configurations >>> vision_config = SamVisionConfig() >>> prompt_encoder_config = SamPromptEncoderConfig() >>> mask_decoder_config = SamMaskDecoderConfig() >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "sam" sub_configs = { "prompt_encoder_config": SamPromptEncoderConfig, "mask_decoder_config": SamMaskDecoderConfig, "vision_config": SamVisionConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} if isinstance(vision_config, SamVisionConfig): vision_config = vision_config.to_dict() if isinstance(prompt_encoder_config, SamPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, SamMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = SamVisionConfig(**vision_config) self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range
class_definition
10,451
14,069
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sam/configuration_sam.py
null
4,185
class MptAttention(nn.Module): """Multi-head self attention. Using torch or triton attention implemetation enables user to also use additive bias. """ def __init__(self, config: MptConfig): super().__init__() self.hidden_size = config.hidden_size self.n_heads = config.n_heads self.max_seq_length = config.max_seq_len self.head_dim = self.hidden_size // self.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads) self.attn_dropout_p = config.attn_config.attn_pdrop self.clip_qkv = config.attn_config.clip_qkv self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, position_bias: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, ): batch_size, seq_length = hidden_states.shape[:2] mixed_qkv = self.Wqkv(hidden_states) if self.clip_qkv: mixed_qkv = mixed_qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv) query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2) query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: if len(past_key_value) != 0: key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) else: past_key_value = (key_states, value_states) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale query_length = seq_length if past_key_value is None else seq_length + past_key_value[0].shape[2] if position_bias is not None: if len(position_bias.shape) != 3: raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}") key_length = key_states.shape[-2] position_bias_query_index = max(0, position_bias.size(1) - query_length) position_bias_key_index = max(0, position_bias.size(2) - key_length) position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:] attention_scores = attention_scores + position_bias if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training) context_states = torch.matmul(attn_weights, value_states) context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) attn_output = self.out_proj(context_states) return attn_output, attn_weights, past_key_value
class_definition
2,710
6,303
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,186
class MptMLP(nn.Module): def __init__(self, config: MptConfig): super().__init__() hidden_size = config.hidden_size self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False) self.act = nn.GELU(approximate="none") self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False) self.hidden_dropout = config.attn_config.attn_pdrop def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.act(self.up_proj(hidden_states)) intermediate_output = self.down_proj(hidden_states) output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training) output = output + residual return output
class_definition
6,306
7,071
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,187
class MptBlock(nn.Module): def __init__(self, config: MptConfig): super().__init__() hidden_size = config.hidden_size self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_1.bias = None self.num_heads = config.n_heads self.attn = MptAttention(config) self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_2.bias = None self.ffn = MptMLP(config) self.dropout_rate = config.attn_config.attn_pdrop self.resid_attn_dropout = nn.Dropout(self.dropout_rate) def forward( self, hidden_states: torch.Tensor, position_bias: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, output_attentions: bool = False, ): # hidden_states: [batch_size, seq_length, hidden_size] # Layer norm at the beginning of the transformer layer. layernorm_output = self.norm_1(hidden_states) residual = hidden_states # Self attention. attn_outputs, attn_weights, past_key_value = self.attn( layernorm_output, position_bias=position_bias, attention_mask=attention_mask, past_key_value=layer_past, ) hidden_states = self.resid_attn_dropout(attn_outputs) + residual layernorm_output = self.norm_2(hidden_states) # Get residual residual = hidden_states # MLP. output = self.ffn(layernorm_output, residual) outputs = (output,) if use_cache: outputs += (past_key_value,) if output_attentions: outputs += (attn_weights,) return outputs # hidden_states, present, attentions
class_definition
7,074
9,029
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,188
class MptPreTrainedModel(PreTrainedModel): config_class = MptConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["MptBlock"] _keys_to_ignore_on_load_missing = [r"lm_head.*."] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): if module.bias is not None: module.bias.data.zero_() module.weight.data.fill_(1.0) @staticmethod def _convert_to_mpt_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...])) """ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length] # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim] return tuple( ( layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim), ) for layer_past in past_key_value )
class_definition
9,032
11,177
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,189
class MptModel(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.hidden_size = config.hidden_size self.num_heads = config.n_heads # Embedding + LN Embedding self.wte = nn.Embedding(config.vocab_size, self.hidden_size) # Transformer blocks self.blocks = nn.ModuleList([MptBlock(config) for _ in range(config.n_layers)]) # Final Layer Norm self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_f.bias = None self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None): return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device) def set_input_embeddings(self, new_embeddings: torch.Tensor): self.wte = new_embeddings @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) hidden_states = inputs_embeds presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Compute alibi tensor: check build_alibi_tensor documentation seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device) causal_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) causal_mask = causal_mask.bool() for block, layer_past in zip(self.blocks, past_key_values): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, alibi, causal_mask, layer_past, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, use_cache=use_cache, output_attentions=output_attentions, position_bias=alibi, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # Add last hidden state hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
15,195
21,277
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,190
class MptForCausalLM(MptPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: MptConfig): super().__init__(config) self.transformer = MptModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def _reorder_cache( self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. Output shares the same memory storage as `past`. """ # Get a copy of `beam_idx` on all the devices where we need those indices. device_to_beam_idx = { past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past } reordered_past = tuple( ( layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]), ) for layer_past in past ) return reordered_past
class_definition
21,476
26,061
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,191
class MptForSequenceClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
class_definition
26,848
31,832
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,192
class MptForTokenClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
class_definition
32,059
35,491
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,193
class MptForQuestionAnswering(MptPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = MptModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
35,792
39,486
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/modeling_mpt.py
null
4,194
class MptAttentionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate attention layers according to the specified arguments, defining the layers architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MPT [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: attn_type (`str`, *optional*, defaults to `"multihead_attention"`): type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`. attn_pdrop (`float`, *optional*, defaults to `0.0`): The dropout probability for the attention layers. attn_impl (`str`, *optional*, defaults to `"torch"`): The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`. clip_qkv (`float`, *optional*): If not `None`, clip the queries, keys, and values in the attention layer to this value. softmax_scale (`float`, *optional*): If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to `1/sqrt(hidden_size)`. prefix_lm (`bool`, *optional*, defaults to `False`): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. qk_ln (`bool`, *optional*, defaults to `False`): Whether to apply layer normalization to the queries and keys in the attention layer. attn_uses_sequence_id (`bool`, *optional*, defaults to `False`): Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train` mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored. alibi (`bool`, *optional*, defaults to `True`): Whether or not to use the alibi bias instead of positional embedding. alibi_bias_max (`int`, *optional*, defaults to 8): The maximum value of the alibi bias. """ base_config_key = "attn_config" def __init__( self, attn_type="multihead_attention", attn_pdrop=0, attn_impl="torch", clip_qkv=None, softmax_scale=None, prefix_lm=False, qk_ln=False, attn_uses_sequence_id=False, alibi=True, alibi_bias_max=8, **kwargs, ): super().__init__() self.attn_type = attn_type self.attn_pdrop = attn_pdrop self.attn_impl = attn_impl self.clip_qkv = clip_qkv self.softmax_scale = softmax_scale self.prefix_lm = prefix_lm self.attn_uses_sequence_id = attn_uses_sequence_id self.alibi = alibi self.qk_ln = qk_ln self.alibi_bias_max = alibi_bias_max if attn_type not in ["multihead_attention", "multiquery_attention"]: raise ValueError( f"`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}" )
class_definition
851
4,618
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/configuration_mpt.py
null
4,195
class MptConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to the Mpt-7b architecture [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: d_model (`int`, *optional*, defaults to 2048): Dimensionality of the embeddings and hidden states. n_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. n_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. expansion_ratio (`int`, *optional*, defaults to 4): The ratio of the up/down scale in the MLP. max_seq_len (`int`, *optional*, defaults to 2048): The maximum sequence length of the model. vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MptModel`]. Check [this discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the `vocab_size` has been defined. resid_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability applied to the attention output before combining with residual. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. emb_pdrop (`float`, *optional*, defaults to 0.0): The dropout probability for the embedding layer. learned_pos_emb (`bool`, *optional*, defaults to `True`): Whether to use learned positional embeddings. attn_config (`dict`, *optional*): A dictionary used to configure the model's attention module. init_device (`str`, *optional*, defaults to `"cpu"`): The device to use for parameter initialization. Defined for backward compatibility logit_scale (`float`, *optional*): If not None, scale the logits by this value. no_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in all linear layers. verbose (`int`, *optional*, defaults to 0): The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This argument is deprecated. embedding_fraction (`float`, *optional*, defaults to 1.0): The fraction to scale the gradients of the embedding layer by. norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`): Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward compatibility. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MptConfig, MptModel >>> # Initializing a Mpt configuration >>> configuration = MptConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MptModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "mpt" sub_configs = {"attn_config": MptAttentionConfig} attribute_map = { "num_attention_heads": "n_heads", "hidden_size": "d_model", "num_hidden_layers": "n_layers", } def __init__( self, d_model: int = 2048, n_heads: int = 16, n_layers: int = 24, expansion_ratio: int = 4, max_seq_len: int = 2048, vocab_size: int = 50368, resid_pdrop: float = 0.0, layer_norm_epsilon: float = 1e-5, emb_pdrop: float = 0.0, learned_pos_emb: bool = True, attn_config: MptAttentionConfig = None, init_device: str = "cpu", logit_scale: Optional[Union[float, str]] = None, no_bias: bool = True, verbose: int = 0, embedding_fraction: float = 1.0, norm_type: str = "low_precision_layernorm", use_cache: bool = False, initializer_range=0.02, **kwargs, ): if attn_config is None: self.attn_config = MptAttentionConfig() elif isinstance(attn_config, dict): self.attn_config = MptAttentionConfig(**attn_config) else: self.attn_config = attn_config self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.verbose = verbose self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.layer_norm_epsilon = layer_norm_epsilon self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs)
class_definition
4,621
10,516
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mpt/configuration_mpt.py
null
4,196
class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput): """ Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. char_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`): Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char offsets can be used to compute time stamps for each charater. Total logit score of the beam associated with produced text. """ text: Union[List[str], str] char_offsets: Union[List[ListOfDict], ListOfDict] = None
class_definition
1,600
2,327
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py
null
4,197
class Wav2Vec2PhonemeCTCTokenizer(PreTrainedTokenizer): """ Constructs a Wav2Vec2PhonemeCTC tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. do_phonemize (`bool`, *optional*, defaults to `True`): Whether the tokenizer should phonetize the input or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. phonemizer_lang (`str`, *optional*, defaults to `"en-us"`): The language of the phoneme set to which the tokenizer should phonetize the input text to. phonemizer_backend (`str`, *optional*. defaults to `"espeak"`): The backend phonetization library that shall be used by the phonemizer library. Defaults to `espeak-ng`. See the [phonemizer package](https://github.com/bootphon/phonemizer#readme). for more information. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", phone_delimiter_token=" ", word_delimiter_token=None, do_phonemize=True, phonemizer_lang="en-us", phonemizer_backend="espeak", **kwargs, ): self._word_delimiter_token = word_delimiter_token self._phone_delimiter_token = phone_delimiter_token self.do_phonemize = do_phonemize self.phonemizer_lang = phonemizer_lang self.phonemizer_backend = phonemizer_backend if do_phonemize: self.init_backend(self.phonemizer_lang) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, word_delimiter_token=word_delimiter_token, phone_delimiter_token=phone_delimiter_token, do_phonemize=do_phonemize, phonemizer_lang=phonemizer_lang, phonemizer_backend=phonemizer_backend, **kwargs, ) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: vocab = dict(self.encoder.copy()) vocab.update(self.added_tokens_encoder) return vocab def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: # Overwritten to never strip! to_add = [] for token in new_tokens: if isinstance(token, str): to_add.append(AddedToken(token, rstrip=False, lstrip=False, normalized=True, special=special_tokens)) else: to_add.append(token) return super()._add_tokens(to_add, special_tokens) def init_backend(self, phonemizer_lang: str): """ Initializes the backend. Args: phonemizer_lang (`str`): The language to be used. """ requires_backends(self, "phonemizer") from phonemizer.backend import BACKENDS self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch="remove-flags") def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, phonemizer_lang: Optional[str] = None, do_phonemize: Optional[bool] = None, ) -> Tuple[str, Dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. phonemizer_lang (`str`, *optional*): The language of the phoneme set to which the tokenizer should phonetize the input text to. do_phonemize (`bool`, *optional*): Whether the tokenizer should phonetize the input text or not. Only if a sequence of phonemes is passed to the tokenizer, `do_phonemize` should be set to `False`. Returns: `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. """ if is_split_into_words: text = " " + text # set whether tokenizer should phonemize or not if do_phonemize is not None: self.do_phonemize = do_phonemize # set the correct phonemizer language if phonemizer_lang is not None: self.phonemizer_lang = phonemizer_lang self.init_backend(phonemizer_lang) return (text, {}) def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. """ # make sure whitespace is stripped to prevent <unk> text = text.strip() # phonemize if self.do_phonemize: text = text.lower() # create list of phonemes text = self.phonemize(text, self.phonemizer_lang) # make sure ' ' is between phonemes tokens = text.split(" ") tokens = list(filter(lambda p: p.strip() != "", tokens)) return tokens def phonemize(self, text: str, phonemizer_lang: Optional[str] = None) -> str: from phonemizer.separator import Separator word_delimiter = self.word_delimiter_token + " " if self.word_delimiter_token is not None else "" if phonemizer_lang is not None and phonemizer_lang != self.phonemizer_lang: self.init_backend(phonemizer_lang) else: phonemizer_lang = self.phonemizer_lang separator = Separator(phone=self.phone_delimiter_token, word=word_delimiter, syllable="") phonemes = self.backend.phonemize( [text], separator=separator, ) phonemes = phonemes[0].strip() return phonemes @property def word_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._word_delimiter_token is None: if self.verbose: logger.error("Using word_delimiter_token, but it is not set yet.") return None return str(self._word_delimiter_token) @property def word_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._word_delimiter_token is None: return None return self.convert_tokens_to_ids(self.word_delimiter_token) @word_delimiter_token.setter def word_delimiter_token(self, value): self._word_delimiter_token = value @word_delimiter_token_id.setter def word_delimiter_token_id(self, value): self._word_delimiter_token = self.convert_tokens_to_ids(value) @property def phone_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._phone_delimiter_token is None: if self.verbose: logger.error("Using phone_delimiter_token, but it is not set yet.") return None return str(self._phone_delimiter_token) @property def phone_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the phone_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._phone_delimiter_token is None: return None return self.convert_tokens_to_ids(self.phone_delimiter_token) @phone_delimiter_token.setter def phone_delimiter_token(self, value): self._phone_delimiter_token = value @phone_delimiter_token_id.setter def phone_delimiter_token_id(self, value): self._phone_delimiter_token = self.convert_tokens_to_ids(value) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string( self, tokens: List[str], group_tokens: bool = True, spaces_between_special_tokens: bool = False, filter_word_delimiter_token: bool = True, output_char_offsets: bool = False, ) -> str: """ Converts a connectionist-temporal-classification (CTC) output tokens into a single string. """ # group same tokens into non-repeating tokens in CTC style decoding if group_tokens: chars, char_repetitions = zip(*((token, len(list(group_iter))) for token, group_iter in groupby(tokens))) else: chars = tokens char_repetitions = len(tokens) * [1] # filter self.pad_token which is used as CTC-blank token processed_chars = list(filter(lambda char: char != self.pad_token, chars)) # also filter self.word_delimiter_token if not not if filter_word_delimiter_token and self.word_delimiter_token is not None: processed_chars = list(filter(lambda token: token != self.word_delimiter_token, processed_chars)) # retrieve offsets char_offsets = None if output_char_offsets: word_delimiter_token_for_offsets = ( self.word_delimiter_token if filter_word_delimiter_token is True else None ) char_offsets = self._compute_offsets( char_repetitions, chars, self.pad_token, word_delimiter_token=word_delimiter_token_for_offsets ) if len(char_offsets) != len(processed_chars): raise ValueError( f"`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars}" " have to be of the same length, but are: `len(offsets)`: " f"{len(char_offsets)} and `len(processed_tokens)`: {len(processed_chars)}" ) # set tokens to correct processed token for i, char in enumerate(processed_chars): char_offsets[i]["char"] = char string = " ".join(processed_chars).strip() return {"text": string, "char_offsets": char_offsets} @staticmethod def _compute_offsets( char_repetitions: List[int], chars: List[str], ctc_token: int, word_delimiter_token: Optional[int] = None ) -> List[Dict[str, Union[str, int]]]: end_indices = np.asarray(char_repetitions).cumsum() start_indices = np.concatenate(([0], end_indices[:-1])) offsets = [ {"char": t, "start_offset": s, "end_offset": e} for t, s, e in zip(chars, start_indices, end_indices) ] # filter out CTC token offsets = list(filter(lambda offsets: offsets["char"] != ctc_token, offsets)) # filter out word delimiter token if necessary if word_delimiter_token is not None: offsets = list(filter(lambda offsets: offsets["char"] != word_delimiter_token, offsets)) return offsets def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, group_tokens: bool = True, filter_word_delimiter_token: bool = True, spaces_between_special_tokens: bool = False, output_char_offsets: bool = False, ) -> str: """ special _decode function is needed for Wav2Vec2PhonemeTokenizer because added tokens should be treated exactly the same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on the whole token list and not individually on added tokens """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) result = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue result.append(token) string_output = self.convert_tokens_to_string( result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, filter_word_delimiter_token=filter_word_delimiter_token, output_char_offsets=output_char_offsets, ) text = string_output["text"] clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: text = self.clean_up_tokenization(text) if output_char_offsets: return Wav2Vec2PhonemeCTCTokenizerOutput(text=text, char_offsets=string_output["char_offsets"]) else: return text # overwritten from `tokenization_utils_base.py` because we need docs for `output_char_offsets` here def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, output_char_offsets: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works the same way with phonemes. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) # overwritten from `tokenization_utils_base.py` because tokenizer can output # `ModelOutput` which should not be a list for batched output and because # we need docs for `output_char_offsets` here def batch_decode( self, sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, output_char_offsets: bool = False, **kwargs, ) -> List[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~models.wav2vec2.tokenization_wav2vec2.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_phoneme.tokenization_wav2vec2_phoneme.batch_decode`] works analogous with phonemes and batched output. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `List[str]` or [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`]: The decoded sentence. Will be a [`~models.wav2vec2.tokenization_wav2vec2_phoneme.Wav2Vec2PhonemeCTCTokenizerOutput`] when `output_char_offsets == True`. """ batch_decoded = [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, **kwargs, ) for seq in sequences ] if output_char_offsets: # transform list of dicts to dict of lists return Wav2Vec2PhonemeCTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]}) return batch_decoded def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,)
class_definition
2,330
23,160
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py
null
4,198
class MaskFormerFeatureExtractor(MaskFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class MaskFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MaskFormerImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
827
1,213
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/maskformer/feature_extraction_maskformer.py
null
4,199