text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class TFXLMRobertaOutput(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size])
class_definition
24,689
26,030
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,900
class TFXLMRobertaLayer(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.attention = TFXLMRobertaAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFXLMRobertaAttention(config, name="crossattention") self.intermediate = TFXLMRobertaIntermediate(config, name="intermediate") self.bert_output = TFXLMRobertaOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None)
class_definition
26,123
30,888
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,901
class TFXLMRobertaEncoder(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFXLMRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None)
class_definition
30,983
34,082
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,902
class TFXLMRobertaMainLayer(keras.layers.Layer): config_class = XLMRobertaConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.encoder = TFXLMRobertaEncoder(config, name="encoder") self.pooler = TFXLMRobertaPooler(config, name="pooler") if add_pooling_layer else None # The embeddings must be the last declaration in order to follow the weights order self.embeddings = TFXLMRobertaEmbeddings(config, name="embeddings") # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None)
class_definition
34,211
44,700
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,903
class TFXLMRobertaPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLMRobertaConfig base_model_prefix = "roberta"
class_definition
44,815
45,084
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,904
class TFXLMRobertaModel(TFXLMRobertaPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, name="roberta") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.roberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None)
class_definition
45,378
49,253
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,905
class TFXLMRobertaLMHead(keras.layers.Layer): """XLMRoberta Head for masked language modeling.""" def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.act = get_tf_activation("gelu") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, value): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.layer_norm(hidden_states) # project back to size of vocabulary with bias seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states
class_definition
49,359
51,783
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,906
class TFXLMRobertaForMaskedLM(TFXLMRobertaPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.lm_head = TFXLMRobertaLMHead(config, self.roberta.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.1, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None)
class_definition
52,032
55,723
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,907
class TFXLMRobertaForCausalLM(TFXLMRobertaPreTrainedModel, TFCausalLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] def __init__(self, config: XLMRobertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning("If you want to use `TFXLMRobertaLMHeadModel` as a standalone, add `is_decoder=True.`") self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.lm_head = TFXLMRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = tf.ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ outputs = self.roberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.lm_head(hidden_states=sequence_output, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None)
class_definition
55,999
62,629
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,908
class TFXLMRobertaClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, features, training=False): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size])
class_definition
62,747
64,304
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,909
class TFXLMRobertaForSequenceClassification(TFXLMRobertaPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.classifier = TFXLMRobertaClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="cardiffnlp/twitter-roberta-base-emotion", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'optimism'", expected_loss=0.08, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None)
class_definition
64,684
68,146
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,910
class TFXLMRobertaForMultipleChoice(TFXLMRobertaPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, name="roberta") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None outputs = self.roberta( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
class_definition
68,527
72,758
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,911
class TFXLMRobertaForTokenClassification(TFXLMRobertaPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="ydshieh/roberta-large-ner-english", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", expected_loss=0.01, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size])
class_definition
73,142
76,934
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,912
class TFXLMRobertaForQuestionAnswering(TFXLMRobertaPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="ydshieh/roberta-base-squad2", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.86, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size])
class_definition
77,374
81,821
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
null
5,913
class XLMRobertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMRobertaModel`] or a [`TFXLMRobertaModel`]. It is used to instantiate a XLM-RoBERTa model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XLMRoBERTa [FacebookAI/xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the XLM-RoBERTa model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XLMRobertaModel`] or [`TFXLMRobertaModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaModel`] or [`TFXLMRobertaModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import XLMRobertaConfig, XLMRobertaModel >>> # Initializing a XLM-RoBERTa FacebookAI/xlm-roberta-base style configuration >>> configuration = XLMRobertaConfig() >>> # Initializing a model (with random weights) from the FacebookAI/xlm-roberta-base style configuration >>> model = XLMRobertaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xlm-roberta" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout
class_definition
960
6,952
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py
null
5,914
class XLMRobertaOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
class_definition
7,062
7,513
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py
null
5,915
class FlaxXLMRobertaEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states
class_definition
6,186
8,019
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,916
class FlaxXLMRobertaSelfAttention(nn.Module): config: XLMRobertaConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs
class_definition
8,124
16,027
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,917
class FlaxXLMRobertaSelfOutput(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
16,129
16,955
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,918
class FlaxXLMRobertaAttention(nn.Module): config: XLMRobertaConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxXLMRobertaSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxXLMRobertaSelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs
class_definition
17,056
18,484
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,919
class FlaxXLMRobertaIntermediate(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class_definition
18,588
19,178
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,920
class FlaxXLMRobertaOutput(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states
class_definition
19,276
20,106
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,921
class FlaxXLMRobertaLayer(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxXLMRobertaAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxXLMRobertaIntermediate(self.config, dtype=self.dtype) self.output = FlaxXLMRobertaOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxXLMRobertaAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs
class_definition
20,203
22,379
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,922
class FlaxXLMRobertaLayerCollection(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxXLMRobertaCheckpointLayer = remat(FlaxXLMRobertaLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxXLMRobertaCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxXLMRobertaLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, )
class_definition
22,486
25,530
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,923
class FlaxXLMRobertaEncoder(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxXLMRobertaLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
25,629
26,884
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,924
class FlaxXLMRobertaPooler(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state)
class_definition
26,982
27,513
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,925
class FlaxXLMRobertaLMHead(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense( self.config.vocab_size, dtype=self.dtype, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN["gelu"](hidden_states) hidden_states = self.layer_norm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states
class_definition
27,623
28,952
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,926
class FlaxXLMRobertaClassificationHead(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = nn.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states
class_definition
29,074
30,328
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,927
class FlaxXLMRobertaPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLMRobertaConfig base_model_prefix = "xlm-roberta" module_class: nn.Module = None def __init__( self, config: XLMRobertaConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.ones_like(input_ids) position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxXLMRobertaAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs
class_definition
30,491
38,474
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,928
class FlaxXLMRobertaModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxXLMRobertaEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxXLMRobertaEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.pooler = FlaxXLMRobertaPooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
38,572
41,322
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,929
class FlaxXLMRobertaModel(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaModule
class_definition
41,492
41,589
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,930
class FlaxXLMRobertaForMaskedLMModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxXLMRobertaLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
41,832
43,577
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,931
class FlaxXLMRobertaForMaskedLM(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForMaskedLMModule
class_definition
43,696
43,810
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,932
class FlaxXLMRobertaForSequenceClassificationModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.classifier = FlaxXLMRobertaClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
44,111
45,651
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,933
class FlaxXLMRobertaForSequenceClassification(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForSequenceClassificationModule
class_definition
45,887
46,029
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,934
class FlaxXLMRobertaForMultipleChoiceModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
46,333
48,471
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,935
class FlaxXLMRobertaForMultipleChoice(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForMultipleChoiceModule
class_definition
48,716
48,842
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,936
class FlaxXLMRobertaForTokenClassificationModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
49,289
51,101
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,937
class FlaxXLMRobertaForTokenClassification(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForTokenClassificationModule
class_definition
51,344
51,480
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,938
class FlaxXLMRobertaForQuestionAnsweringModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
51,781
53,510
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,939
class FlaxXLMRobertaForQuestionAnswering(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForQuestionAnsweringModule
class_definition
53,811
53,943
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,940
class FlaxXLMRobertaForCausalLMModule(nn.Module): config: XLMRobertaConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta = FlaxXLMRobertaModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxXLMRobertaLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
54,222
56,409
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,941
class FlaxXLMRobertaForCausalLM(FlaxXLMRobertaPreTrainedModel): module_class = FlaxXLMRobertaForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
class_definition
56,744
58,290
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
null
5,942
class CLIPSegTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the CLIPSeg text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CLIPSegModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 77): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 49406): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 49407): End of stream token id. Example: ```python >>> from transformers import CLIPSegTextConfig, CLIPSegTextModel >>> # Initializing a CLIPSegTextConfig with CIDAS/clipseg-rd64 style configuration >>> configuration = CLIPSegTextConfig() >>> # Initializing a CLIPSegTextModel (with random weights) from the CIDAS/clipseg-rd64 style configuration >>> model = CLIPSegTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "clipseg_text_model" base_config_key = "text_config" def __init__( self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout
class_definition
784
5,378
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/configuration_clipseg.py
null
5,943
class CLIPSegVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import CLIPSegVisionConfig, CLIPSegVisionModel >>> # Initializing a CLIPSegVisionConfig with CIDAS/clipseg-rd64 style configuration >>> configuration = CLIPSegVisionConfig() >>> # Initializing a CLIPSegVisionModel (with random weights) from the CIDAS/clipseg-rd64 style configuration >>> model = CLIPSegVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "clipseg_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act
class_definition
5,381
9,392
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/configuration_clipseg.py
null
5,944
class CLIPSegConfig(PretrainedConfig): r""" [`CLIPSegConfig`] is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate a CLIPSeg model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`CLIPSegTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`CLIPSegVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original CLIPSeg implementation. extract_layers (`List[int]`, *optional*, defaults to `[3, 6, 9]`): Layers to extract when forwarding the query image through the frozen visual backbone of CLIP. reduce_dim (`int`, *optional*, defaults to 64): Dimensionality to reduce the CLIP vision embedding. decoder_num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads in the decoder of CLIPSeg. decoder_attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. decoder_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layers in the Transformer decoder. conditional_layer (`int`, *optional*, defaults to 0): The layer to use of the Transformer encoder whose activations will be combined with the condition embeddings using FiLM (Feature-wise Linear Modulation). If 0, the last layer is used. use_complex_transposed_convolution (`bool`, *optional*, defaults to `False`): Whether to use a more complex transposed convolution in the decoder, enabling more fine-grained segmentation. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import CLIPSegConfig, CLIPSegModel >>> # Initializing a CLIPSegConfig with CIDAS/clipseg-rd64 style configuration >>> configuration = CLIPSegConfig() >>> # Initializing a CLIPSegModel (with random weights) from the CIDAS/clipseg-rd64 style configuration >>> model = CLIPSegModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a CLIPSegConfig from a CLIPSegTextConfig and a CLIPSegVisionConfig >>> # Initializing a CLIPSegText and CLIPSegVision configuration >>> config_text = CLIPSegTextConfig() >>> config_vision = CLIPSegVisionConfig() >>> config = CLIPSegConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "clipseg" sub_configs = {"text_config": CLIPSegTextConfig, "vision_config": CLIPSegVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, extract_layers=[3, 6, 9], reduce_dim=64, decoder_num_attention_heads=4, decoder_attention_dropout=0.0, decoder_hidden_act="quick_gelu", decoder_intermediate_size=2048, conditional_layer=0, use_complex_transposed_convolution=False, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) vision_config_dict = kwargs.pop("vision_config_dict", None) super().__init__(**kwargs) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = CLIPSegTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `CLIPSegTextConfig`. The " f'value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} # This is the complete result when using `vision_config_dict`. _vision_config_dict = CLIPSegVisionConfig(**vision_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _vision_config_dict["id2label"] = { str(key): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: message = ( f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " f'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`vision_config_dict` is provided which will be used to initialize `CLIPSegVisionConfig`. " f'The value `vision_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `CLIPSegTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. initializing the `CLIPSegVisionConfig` with default values.") self.text_config = CLIPSegTextConfig(**text_config) self.vision_config = CLIPSegVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.extract_layers = extract_layers self.reduce_dim = reduce_dim self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_attention_dropout = decoder_attention_dropout self.decoder_hidden_act = decoder_hidden_act self.decoder_intermediate_size = decoder_intermediate_size self.conditional_layer = conditional_layer self.initializer_factor = 1.0 self.use_complex_transposed_convolution = use_complex_transposed_convolution @classmethod def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: CLIPSegVisionConfig, **kwargs): r""" Instantiate a [`CLIPSegConfig`] (or a derived class) from clipseg text model configuration and clipseg vision model configuration. Returns: [`CLIPSegConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
class_definition
9,395
19,278
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/configuration_clipseg.py
null
5,945
class CLIPSegProcessor(ProcessorMixin): r""" Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor. [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "ViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images.") if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if visual_prompt is not None: prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if visual_prompt is not None and images is not None: encoding = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: encoding = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
class_definition
772
7,789
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/processing_clipseg.py
null
5,946
class CLIPSegOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
2,146
4,023
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,947
class CLIPSegDecoderOutput(ModelOutput): """ Args: logits (`torch.FloatTensor` of shape `(batch_size, height, width)`): Classification scores for each pixel. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
4,037
5,188
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,948
class CLIPSegImageSegmentationOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. ... vision_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None conditional_embeddings: torch.FloatTensor = None pooled_output: torch.FloatTensor = None vision_model_output: BaseModelOutputWithPooling = None decoder_output: CLIPSegDecoderOutput = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["vision_model_output", "decoder_output"] else getattr(self, k).to_tuple() for k in self.keys() )
class_definition
5,202
6,061
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,949
class CLIPSegVisionEmbeddings(nn.Module): # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.__init__ with CLIP->CLIPSeg def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=True) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size})." ) patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class_definition
6,064
9,921
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,950
class CLIPSegTextEmbeddings(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class_definition
10,015
11,599
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,951
class CLIPSegAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped
class_definition
11,688
16,421
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,952
class CLIPSegMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
16,504
17,077
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,953
class CLIPSegEncoderLayer(nn.Module): def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
17,181
19,138
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,954
class CLIPSegPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CLIPSegConfig base_model_prefix = "clip" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, CLIPSegTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, CLIPSegVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, CLIPSegAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPSegMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, CLIPSegModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class_definition
19,141
21,791
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,955
class CLIPSegEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`CLIPSegEncoderLayer`]. Args: config: CLIPSegConfig """ def __init__(self, config: CLIPSegConfig): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
class_definition
27,496
31,899
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,956
class CLIPSegTextTransformer(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegTextEmbeddings(config) self.encoder = CLIPSegEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # For `pooled_output` computation self.eos_token_id = config.eos_token_id @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) # Adapted from transformers.models.clip.modeling_clip.CLIPTextTransformer.forward with clip->clipseg, CLIP->CLIPSeg def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # CLIPSeg's text model uses causal mask, prepare it here. # https://github.com/openai/CLIPSeg/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clipseg/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIPSeg model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), ] else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`) # Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer) (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id) .int() .argmax(dim=-1), ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
31,902
36,655
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,957
class CLIPSegTextModel(CLIPSegPreTrainedModel): config_class = CLIPSegTextConfig _no_split_modules = ["CLIPSegTextEmbeddings", "CLIPSegEncoderLayer"] def __init__(self, config: CLIPSegTextConfig): super().__init__(config) self.text_model = CLIPSegTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, CLIPSegTextModel >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegTextModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
36,658
38,718
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,958
class CLIPSegVisionTransformer(nn.Module): # Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIP->CLIPSeg def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = CLIPSegEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor], output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
38,721
41,126
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,959
class CLIPSegVisionModel(CLIPSegPreTrainedModel): config_class = CLIPSegVisionConfig main_input_name = "pixel_values" def __init__(self, config: CLIPSegVisionConfig): super().__init__(config) self.vision_model = CLIPSegVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = True, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegVisionModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegVisionModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, )
class_definition
41,129
43,170
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,960
class CLIPSegModel(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) if not isinstance(config.text_config, CLIPSegTextConfig): raise TypeError( "config.text_config is expected to be of type CLIPSegTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPSegVisionConfig): raise TypeError( "config.vision_config is expected to be of type CLIPSegVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = CLIPSegTextTransformer(text_config) self.vision_model = CLIPSegVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, CLIPSegModel >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = True, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegOutput, config_class=CLIPSegConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = True, return_dict: Optional[bool] = None, ) -> Union[Tuple, CLIPSegOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = clipseg_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return CLIPSegOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
class_definition
43,220
52,795
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,961
class CLIPSegDecoderLayer(nn.Module): """ CLIPSeg decoder layer, which is identical to `CLIPSegEncoderLayer`, except that normalization is applied after self-attention/MLP, rather than before. """ # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer.__init__ with AltCLIP->CLIPSeg def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states hidden_states = self.layer_norm1(hidden_states) residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states hidden_states = self.layer_norm2(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class_definition
52,798
55,046
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,962
class CLIPSegDecoder(CLIPSegPreTrainedModel): def __init__(self, config: CLIPSegConfig): super().__init__(config) self.conditional_layer = config.conditional_layer self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim) self.film_add = nn.Linear(config.projection_dim, config.reduce_dim) if config.use_complex_transposed_convolution: transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4) self.transposed_convolution = nn.Sequential( nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1), nn.ReLU(), nn.ConvTranspose2d( config.reduce_dim, config.reduce_dim // 2, kernel_size=transposed_kernels[0], stride=transposed_kernels[0], ), nn.ReLU(), nn.ConvTranspose2d( config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1] ), ) else: self.transposed_convolution = nn.ConvTranspose2d( config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size ) depth = len(config.extract_layers) self.reduces = nn.ModuleList( [nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)] ) decoder_config = copy.deepcopy(config.vision_config) decoder_config.hidden_size = config.reduce_dim decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size decoder_config.hidden_act = "relu" self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))]) def forward( self, hidden_states: Tuple[torch.Tensor], conditional_embeddings: torch.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None activations = hidden_states[::-1] output = None for i, (activation, layer, reduce) in enumerate(zip(activations, self.layers, self.reduces)): if output is not None: output = reduce(activation) + output else: output = reduce(activation) if i == self.conditional_layer: output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add( conditional_embeddings ) output = output.permute(1, 0, 2) layer_outputs = layer( output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions ) output = layer_outputs[0] if output_hidden_states: all_hidden_states += (output,) if output_attentions: all_attentions += (layer_outputs[1],) output = output[:, 1:, :].permute(0, 2, 1) # remove cls token and reshape to [batch_size, reduce_dim, seq_len] size = int(math.sqrt(output.shape[2])) batch_size = conditional_embeddings.shape[0] output = output.view(batch_size, output.shape[1], size, size) logits = self.transposed_convolution(output).squeeze(1) if not return_dict: return tuple(v for v in [logits, all_hidden_states, all_attentions] if v is not None) return CLIPSegDecoderOutput( logits=logits, hidden_states=all_hidden_states, attentions=all_attentions, )
class_definition
55,049
58,997
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,963
class CLIPSegForImageSegmentation(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) self.config = config self.clip = CLIPSegModel(config) self.extract_layers = config.extract_layers self.decoder = CLIPSegDecoder(config) # Initialize weights and apply final processing self.post_init() def get_conditional_embeddings( self, batch_size: int = None, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, conditional_pixel_values: Optional[torch.Tensor] = None, ): if input_ids is not None: # compute conditional embeddings from texts if len(input_ids) != batch_size: raise ValueError("Make sure to pass as many prompt texts as there are query images") with torch.no_grad(): conditional_embeddings = self.clip.get_text_features( input_ids, attention_mask=attention_mask, position_ids=position_ids ) elif conditional_pixel_values is not None: # compute conditional embeddings from images if len(conditional_pixel_values) != batch_size: raise ValueError("Make sure to pass as many prompt images as there are query images") with torch.no_grad(): conditional_embeddings = self.clip.get_image_features(conditional_pixel_values) else: raise ValueError( "Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`" ) return conditional_embeddings @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegImageSegmentationOutput, config_class=CLIPSegTextConfig) def forward( self, input_ids: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, conditional_pixel_values: Optional[torch.FloatTensor] = None, conditional_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = True, return_dict: Optional[bool] = None, ) -> Union[Tuple, CLIPSegOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoProcessor, CLIPSegForImageSegmentation >>> from PIL import Image >>> import requests >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a cat", "a remote", "a blanket"] >>> inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> print(logits.shape) torch.Size([3, 352, 352]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the query images through the frozen CLIP vision encoder with torch.no_grad(): vision_outputs = self.clip.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = self.clip.visual_projection(vision_outputs[1]) hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2] # we add +1 here as the hidden states also include the initial embeddings activations = [hidden_states[i + 1] for i in self.extract_layers] # update vision_outputs if return_dict: vision_outputs = BaseModelOutputWithPooling( last_hidden_state=vision_outputs.last_hidden_state, pooler_output=vision_outputs.pooler_output, hidden_states=vision_outputs.hidden_states if output_hidden_states else None, attentions=vision_outputs.attentions, ) else: vision_outputs = ( vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs ) # step 2: compute conditional embeddings, either from text, images or an own provided embedding if conditional_embeddings is None: conditional_embeddings = self.get_conditional_embeddings( batch_size=pixel_values.shape[0], input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, conditional_pixel_values=conditional_pixel_values, ) else: if conditional_embeddings.shape[0] != pixel_values.shape[0]: raise ValueError( "Make sure to pass as many conditional embeddings as there are query images in the batch" ) if conditional_embeddings.shape[1] != self.config.projection_dim: raise ValueError( "Make sure that the feature dimension of the conditional embeddings matches" " `config.projection_dim`." ) # step 3: forward both the pooled output and the activations through the lightweight decoder to predict masks decoder_outputs = self.decoder( activations, conditional_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss = None if labels is not None: # move labels to the correct device to enable PP labels = labels.to(logits.device) loss_fn = nn.BCEWithLogitsLoss() loss = loss_fn(logits, labels) if not return_dict: output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs) return ((loss,) + output) if loss is not None else output return CLIPSegImageSegmentationOutput( loss=loss, logits=logits, conditional_embeddings=conditional_embeddings, pooled_output=pooled_output, vision_model_output=vision_outputs, decoder_output=decoder_outputs, )
class_definition
59,176
66,772
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clipseg/modeling_clipseg.py
null
5,964
class CodeLlamaTokenizer(PreTrainedTokenizer): """ Construct a CodeLlama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. The default configuration match that of [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json) which supports prompt infilling. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`): Prefix token used for infilling. middle_token (`str`, *optional*, defaults to `"▁<MID>"`): Middle token used for infilling. suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`): Suffix token used for infilling. eot_token (`str`, *optional*, defaults to `"▁<EOT>"`): End of text token used for infilling. fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`): The token used to split the input between the prefix and suffix. suffix_first (`bool`, *optional*, defaults to `False`): Whether the input prompt and suffix should be formatted with the suffix first. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether to add a beginning of sequence token at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether to add an end of sequence token at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", prefix_token="▁<PRE>", middle_token="▁<MID>", suffix_token="▁<SUF>", eot_token="▁<EOT>", fill_token="<FILL_ME>", suffix_first=False, sp_model_kwargs: Optional[Dict[str, Any]] = None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, additional_special_tokens=None, use_default_system_prompt=False, **kwargs, ): requires_backends(self, "protobuf") self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token self.use_default_system_prompt = use_default_system_prompt # mark tokens special to skip them additional_special_tokens = additional_special_tokens or [] for token in [prefix_token, middle_token, suffix_token, eot_token]: additional_special_tokens += [token] if token is not None else [] self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self._prefix_token = prefix_token self._middle_token = middle_token self._suffix_token = suffix_token self._eot_token = eot_token self.fill_token = fill_token self.suffix_first = suffix_first self.sp_model = self.get_spm_processor() super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, eot_token=eot_token, fill_token=fill_token, sp_model_kwargs=self.sp_model_kwargs, suffix_first=suffix_first, clean_up_tokenization_spaces=clean_up_tokenization_spaces, additional_special_tokens=additional_special_tokens, use_default_system_prompt=use_default_system_prompt, **kwargs, ) @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) def get_spm_processor(self): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) with open(self.vocab_file, "rb") as f: sp_model = f.read() model_pb2 = import_protobuf() model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer @property def prefix_token(self): return self._prefix_token @property def prefix_id(self): if self._prefix_token is None: return None return self.convert_tokens_to_ids(self.prefix_token) @property def middle_token(self): return self._middle_token @property def middle_id(self): if self._middle_token is None: return None return self.convert_tokens_to_ids(self.middle_token) @property def suffix_token(self): return self._suffix_token @property def suffix_id(self): if self._suffix_token is None: return None return self.convert_tokens_to_ids(self.suffix_token) @property def eot_token(self): return self._eot_token @property def eot_id(self): if self._eot_token is None: return None return self.convert_tokens_to_ids(self.eot_token) @property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size() # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_vocab def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def tokenize(self, prefix, suffix=None, suffix_first=False, **kwargs) -> List[int]: # add a prefix space to `prefix` if self.fill_token is not None and self.fill_token in prefix and suffix is None: prefix, suffix = prefix.split(self.fill_token) if len(prefix) > 0: prefix = SPIECE_UNDERLINE + prefix.replace(SPIECE_UNDERLINE, " ") if suffix is None or len(suffix) < 1: tokens = super().tokenize(prefix, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens prefix_tokens = self._tokenize(prefix) # prefix has an extra `SPIECE_UNDERLINE` if None in (self.prefix_id, self.middle_id, self.suffix_id): raise ValueError( "The input either includes a `prefix` and a `suffix` used for the infilling task," f" or can be split on the {self.fill_token} token, creating a suffix and prefix," " but the model does not support `infilling`." ) suffix_tokens = self._tokenize(suffix) # make sure CodeLlama sp model does not mess up suffix_first = suffix_first if suffix_first is not None else self.suffix_first if suffix_first: # format as " <PRE> <SUF>{suf} <MID> {pre}" return [self.prefix_token, self.suffix_token] + suffix_tokens + [self.middle_token] + prefix_tokens else: # format as " <PRE> {pre} <SUF>{suf} <MID>" return [self.prefix_token] + prefix_tokens + [self.suffix_token] + suffix_tokens + [self.middle_token] def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ tokens = self.sp_model.encode(text, out_type=str) if not text.startswith((SPIECE_UNDERLINE, " ")): return tokens # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE): tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = "" for _, token in enumerate(tokens): # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.save_vocabulary def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
class_definition
1,744
19,215
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/code_llama/tokenization_code_llama.py
null
5,965
class CodeLlamaTokenizerFast(PreTrainedTokenizerFast): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no normalization. ```python >>> from transformers import CodeLlamaTokenizerFast >>> tokenizer = CodeLlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") >>> tokenizer.encode("Hello this is a test") [1, 15043, 445, 338, 263, 1243] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The default configuration match that of [meta-llama/CodeLlama-7b-Instruct-hf](https://huggingface.co/meta-llama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json) which supports prompt infilling. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`): Prefix token used for infilling. middle_token (`str`, *optional*, defaults to `"▁<MID>"`): Middle token used for infilling. suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`): Suffix token used for infilling. eot_token (`str`, *optional*, defaults to `"▁<EOT>"`): End of text token used for infilling. fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`): The token used to split the input between the prefix and suffix. additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. add_bos_token (`bool`, *optional*, defaults to `True`): Whether to add a beginning of sequence token at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether to add an end of sequence token at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = CodeLlamaTokenizer padding_side = "left" model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<s>", eos_token="</s>", prefix_token="▁<PRE>", middle_token="▁<MID>", suffix_token="▁<SUF>", eot_token="▁<EOT>", fill_token="<FILL_ME>", additional_special_tokens=None, add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, **kwargs, ): # mark tokens special to skip them additional_special_tokens = additional_special_tokens or [] for token in [prefix_token, middle_token, suffix_token, eot_token]: additional_special_tokens += [token] if token is not None else [] self.use_default_system_prompt = use_default_system_prompt super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, additional_special_tokens=additional_special_tokens, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, eot_token=eot_token, fill_token=fill_token, use_default_system_prompt=use_default_system_prompt, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.vocab_file = vocab_file self._prefix_token = prefix_token self._middle_token = middle_token self._suffix_token = suffix_token self._eot_token = eot_token self.fill_token = fill_token @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) @property def prefix_token(self): return self._prefix_token @property def prefix_id(self): if self._prefix_token is None: return None return self.convert_tokens_to_ids(self.prefix_token) @property def middle_token(self): return self._middle_token @property def middle_id(self): if self._middle_token is None: return None return self.convert_tokens_to_ids(self.middle_token) @property def suffix_token(self): return self._suffix_token @property def suffix_id(self): if self._suffix_token is None: return None return self.convert_tokens_to_ids(self.suffix_token) @property def eot_id(self): if self._eot_token is None: return None return self.convert_tokens_to_ids(self.eot_token) @property def eot_token(self): return self._eot_token @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def set_infilling_processor(self, reset, suffix_first=False, add_special_tokens=True): """ Updates the normalizer to make sure the prompt format for `infilling` is respected. The infilling format is the following: if suffix_first " <PRE> <SUF>{suf} <MID> {pre}" else: " <PRE> {pre} <SUF>{suf} <MID>" If `reset` is set to `True`, the `normalizer` and `post_processor` are reset to their "normal" behaviour, which is to add a prefix space for the normalizer, and add a `bos_token` to the input text for the `post_processor`. """ if reset: self._tokenizer.normalizer = normalizers.Sequence( [ normalizers.Prepend(prepend="▁"), normalizers.Replace(pattern=" ", content="▁"), ] ) self.update_post_processor() return self._tokenizer.normalizer = normalizers.Replace(pattern=" ", content="▁") pair = [self.bos_token] if self.add_bos_token and add_special_tokens else [] special_tokens = [(self.bos_token, self.bos_token_id)] if self.add_bos_token and add_special_tokens else [] if suffix_first: # format as " <PRE> <SUF>{suf} <MID> {pre}" pair += [self.prefix_token, self.suffix_token, "$B", self.middle_token, "$A"] special_tokens += [ (self.prefix_token, self.prefix_id), (self.suffix_token, self.suffix_id), (self.middle_token, self.middle_id), ] else: # format as " <PRE> {pre} <SUF>{suf} <MID>" pair += [self.prefix_token, "$A", self.suffix_token, "$B", self.middle_token] special_tokens += [ (self.prefix_token, self.prefix_id), (self.suffix_token, self.suffix_id), (self.middle_token, self.middle_id), ] if self.add_eos_token and add_special_tokens: pair += [self.eos_token] special_tokens += [(self.eos_token, self.eos_token_id)] self._tokenizer.post_processor = processors.TemplateProcessing( single="$A", pair=pair, special_tokens=special_tokens ) def encode_plus(self, text, text_pair=None, suffix_first=False, add_special_tokens=True, **kwargs): # hack to make sure the input is pre-process but outside rust text_pair = kwargs.pop("suffix", text_pair) if self.fill_token is not None and self.fill_token in text and text_pair is None: text, text_pair = text.split(self.fill_token) if text_pair is None or len(text_pair) < 1: return super().encode_plus(text, text_pair, add_special_tokens=add_special_tokens, **kwargs) if None in (self.prefix_id, self.middle_id, self.suffix_id): raise ValueError( "Then input includes a `prefix` and a `suffix` used for the infilling task," " the `prefix_id, middle_id, suffix_id` must all be initialized. Current" f" values : {self.prefix_id, self.middle_id, self.suffix_id}" ) self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=add_special_tokens) tokens = super().encode_plus(" " + text, text_pair=text_pair, add_special_tokens=True, **kwargs) self.set_infilling_processor(True) return tokens # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.bos_token_id + token_ids_0 + self.eos_token_id return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id
class_definition
1,876
16,014
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/code_llama/tokenization_code_llama_fast.py
null
5,966
class EfficientNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientNet [google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 600): The input image size. width_coefficient (`float`, *optional*, defaults to 2.0): Scaling coefficient for network width at each stage. depth_coefficient (`float`, *optional*, defaults to 3.1): Scaling coefficient for network depth at each stage. depth_divisor `int`, *optional*, defaults to 8): A unit of network width. kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`): List of kernel sizes to be used in each block. in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`): List of input channel sizes to be used in each block for convolutional layers. out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`): List of output channel sizes to be used in each block for convolutional layers. depthwise_padding (`List[int]`, *optional*, defaults to `[]`): List of block indices with square padding. strides (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`): List of stride sizes to be used in each block for convolutional layers. num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`): List of the number of times each block is to repeated. expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`): List of scaling coefficient of each block. squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25): Squeeze expansion ratio. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported. hiddem_dim (`int`, *optional*, defaults to 1280): The hidden dimension of the layer before the classification head. pooling_type (`str` or `function`, *optional*, defaults to `"mean"`): Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`, `"max"`] initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. batch_norm_eps (`float`, *optional*, defaults to 1e-3): The epsilon used by the batch normalization layers. batch_norm_momentum (`float`, *optional*, defaults to 0.99): The momentum used by the batch normalization layers. dropout_rate (`float`, *optional*, defaults to 0.5): The dropout rate to be applied before final classifier layer. drop_connect_rate (`float`, *optional*, defaults to 0.2): The drop rate for skip connections. Example: ```python >>> from transformers import EfficientNetConfig, EfficientNetModel >>> # Initializing a EfficientNet efficientnet-b7 style configuration >>> configuration = EfficientNetConfig() >>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration >>> model = EfficientNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "efficientnet" def __init__( self, num_channels: int = 3, image_size: int = 600, width_coefficient: float = 2.0, depth_coefficient: float = 3.1, depth_divisor: int = 8, kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3], in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192], out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320], depthwise_padding: List[int] = [], strides: List[int] = [1, 2, 2, 2, 1, 2, 1], num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1], expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float = 0.25, hidden_act: str = "swish", hidden_dim: int = 2560, pooling_type: str = "mean", initializer_range: float = 0.02, batch_norm_eps: float = 0.001, batch_norm_momentum: float = 0.99, dropout_rate: float = 0.5, drop_connect_rate: float = 0.2, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.width_coefficient = width_coefficient self.depth_coefficient = depth_coefficient self.depth_divisor = depth_divisor self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.depthwise_padding = depthwise_padding self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.squeeze_expansion_ratio = squeeze_expansion_ratio self.hidden_act = hidden_act self.hidden_dim = hidden_dim self.pooling_type = pooling_type self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.batch_norm_momentum = batch_norm_momentum self.dropout_rate = dropout_rate self.drop_connect_rate = drop_connect_rate self.num_hidden_layers = sum(num_block_repeats) * 4
class_definition
947
7,187
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/configuration_efficientnet.py
null
5,967
class EfficientNetOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5
class_definition
7,190
7,595
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/configuration_efficientnet.py
null
5,968
class EfficientNetImageProcessor(BaseImageProcessor): r""" Constructs a EfficientNet image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`): Size of the image after `resize`. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling` filter, *optional*, defaults to 0): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`): Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. rescale_offset (`bool`, *optional*, defaults to `False`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. include_top (`bool`, *optional*, defaults to `True`): Whether to rescale the image again. Should be set to True if the inputs are used for image classification. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PIL.Image.NEAREST, do_center_crop: bool = False, crop_size: Dict[str, int] = None, rescale_factor: Union[int, float] = 1 / 255, rescale_offset: bool = False, do_rescale: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 346, "width": 346} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.rescale_offset = rescale_offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.include_top = include_top # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.NEAREST def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.NEAREST, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.NEAREST`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.NEAREST`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is 1/127.5, the image is rescaled between [-1, 1]. image = image * scale - 1 If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1]. image = image * scale Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ rescaled_image = rescale( image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs ) if offset: rescaled_image = rescaled_image - 1 return rescaled_image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample=None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, rescale_offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after `resize`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be padded with zeros and then cropped do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. include_top (`bool`, *optional*, defaults to `self.include_top`): Rescales the image again for image classification if set to True. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - `None`: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std include_top = include_top if include_top is not None else self.include_top size = size if size is not None else self.size size = get_size_dict(size) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale( image=image, scale=rescale_factor, offset=rescale_offset, input_data_format=input_data_format ) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] if include_top: images = [ self.normalize(image=image, mean=0, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,387
18,298
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/image_processing_efficientnet.py
null
5,969
class EfficientNetEmbeddings(nn.Module): r""" A module that corresponds to the stem module of the original work. """ def __init__(self, config: EfficientNetConfig): super().__init__() self.out_dim = round_filters(config, 32) self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) self.convolution = nn.Conv2d( config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False ) self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.activation = ACT2FN[config.hidden_act] def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: features = self.padding(pixel_values) features = self.convolution(features) features = self.batchnorm(features) features = self.activation(features) return features
class_definition
4,080
4,988
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,970
class EfficientNetDepthwiseConv2d(nn.Conv2d): def __init__( self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): out_channels = in_channels * depth_multiplier super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, )
class_definition
4,991
5,637
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,971
class EfficientNetExpansionLayer(nn.Module): r""" This corresponds to the expansion phase of each block in the original implementation. """ def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int): super().__init__() self.expand_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding="same", bias=False, ) self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) self.expand_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: # Expand phase hidden_states = self.expand_conv(hidden_states) hidden_states = self.expand_bn(hidden_states) hidden_states = self.expand_act(hidden_states) return hidden_states
class_definition
5,640
6,537
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,972
class EfficientNetDepthwiseLayer(nn.Module): r""" This corresponds to the depthwise convolution phase of each block in the original implementation. """ def __init__( self, config: EfficientNetConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool, ): super().__init__() self.stride = stride conv_pad = "valid" if self.stride == 2 else "same" padding = correct_pad(kernel_size, adjust=adjust_padding) self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) self.depthwise_conv = EfficientNetDepthwiseConv2d( in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False ) self.depthwise_norm = nn.BatchNorm2d( num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.depthwise_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: # Depthwise convolution if self.stride == 2: hidden_states = self.depthwise_conv_pad(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_norm(hidden_states) hidden_states = self.depthwise_act(hidden_states) return hidden_states
class_definition
6,540
7,897
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,973
class EfficientNetSqueezeExciteLayer(nn.Module): r""" This corresponds to the Squeeze and Excitement phase of each block in the original implementation. """ def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d( in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding="same", ) self.expand = nn.Conv2d( in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding="same", ) self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states
class_definition
7,900
9,275
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,974
class EfficientNetFinalBlockLayer(nn.Module): r""" This corresponds to the final phase of each block in the original implementation. """ def __init__( self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool ): super().__init__() self.apply_dropout = stride == 1 and not id_skip self.project_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding="same", bias=False, ) self.project_bn = nn.BatchNorm2d( num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.dropout = nn.Dropout(p=drop_rate) def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.project_conv(hidden_states) hidden_states = self.project_bn(hidden_states) if self.apply_dropout: hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + embeddings return hidden_states
class_definition
9,278
10,430
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,975
class EfficientNetBlock(nn.Module): r""" This corresponds to the expansion and depthwise convolution phase of each block in the original implementation. Args: config ([`EfficientNetConfig`]): Model configuration class. in_dim (`int`): Number of input channels. out_dim (`int`): Number of output channels. stride (`int`): Stride size to be used in convolution layers. expand_ratio (`int`): Expand ratio to set the output dimensions for the expansion and squeeze-excite layers. kernel_size (`int`): Kernel size for the depthwise convolution layer. drop_rate (`float`): Dropout rate to be used in the final phase of each block. id_skip (`bool`): Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase of each block. Set to `True` for the first block of each stage. adjust_padding (`bool`): Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution operation, set to `True` for inputs with odd input sizes. """ def __init__( self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool, ): super().__init__() self.expand_ratio = expand_ratio self.expand = True if self.expand_ratio != 1 else False expand_in_dim = in_dim * expand_ratio if self.expand: self.expansion = EfficientNetExpansionLayer( config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride ) self.depthwise_conv = EfficientNetDepthwiseLayer( config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding, ) self.squeeze_excite = EfficientNetSqueezeExciteLayer( config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand ) self.projection = EfficientNetFinalBlockLayer( config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip, ) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: embeddings = hidden_states # Expansion and depthwise convolution phase if self.expand_ratio != 1: hidden_states = self.expansion(hidden_states) hidden_states = self.depthwise_conv(hidden_states) # Squeeze and excite phase hidden_states = self.squeeze_excite(hidden_states) hidden_states = self.projection(embeddings, hidden_states) return hidden_states
class_definition
10,433
13,488
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,976
class EfficientNetEncoder(nn.Module): r""" Forward propogates the embeddings through each EfficientNet block. Args: config ([`EfficientNetConfig`]): Model configuration class. """ def __init__(self, config: EfficientNetConfig): super().__init__() self.config = config self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): # Round number of block repeats based on depth multiplier. return int(math.ceil(self.depth_coefficient * repeats)) num_base_blocks = len(config.in_channels) num_blocks = sum(round_repeats(n) for n in config.num_block_repeats) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = True if j == 0 else False stride = 1 if j > 0 else stride in_dim = out_dim if j > 0 else in_dim adjust_padding = False if curr_block_num in config.depthwise_padding else True drop_rate = config.drop_connect_rate * curr_block_num / num_blocks block = EfficientNetBlock( config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding, ) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) self.top_conv = nn.Conv2d( in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding="same", bias=False, ) self.top_bn = nn.BatchNorm2d( num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.top_activation = ACT2FN[config.hidden_act] def forward( self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> BaseModelOutputWithNoAttention: all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.top_conv(hidden_states) hidden_states = self.top_bn(hidden_states) hidden_states = self.top_activation(hidden_states) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, )
class_definition
13,491
16,807
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,977
class EfficientNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = EfficientNetConfig base_model_prefix = "efficientnet" main_input_name = "pixel_values" _no_split_modules = [] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
16,810
17,731
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,978
class EfficientNetModel(EfficientNetPreTrainedModel): def __init__(self, config: EfficientNetConfig): super().__init__(config) self.config = config self.embeddings = EfficientNetEmbeddings(config) self.encoder = EfficientNetEncoder(config) # Final pooling layer if config.pooling_type == "mean": self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) elif config.pooling_type == "max": self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) else: raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Apply pooling last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280) pooled_output = pooled_output.reshape(pooled_output.shape[:2]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
17,886
20,348
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,979
class EfficientNetForImageClassification(EfficientNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.efficientnet = EfficientNetModel(config) # Classifier head self.dropout = nn.Dropout(p=config.dropout_rate) self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: torch.FloatTensor = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
class_definition
20,562
23,945
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/efficientnet/modeling_efficientnet.py
null
5,980
class MgpstrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MgpstrModel`]. It is used to instantiate an MGP-STR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MGP-STR [alibaba-damo/mgp-str-base](https://huggingface.co/alibaba-damo/mgp-str-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`List[int]`, *optional*, defaults to `[32, 128]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. max_token_length (`int`, *optional*, defaults to 27): The max number of output tokens. num_character_labels (`int`, *optional*, defaults to 38): The number of classes for character head . num_bpe_labels (`int`, *optional*, defaults to 50257): The number of classes for bpe head . num_wordpiece_labels (`int`, *optional*, defaults to 30522): The number of classes for wordpiece head . hidden_size (`int`, *optional*, defaults to 768): The embedding dimension. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of mlp hidden dim to embedding dim. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. distilled (`bool`, *optional*, defaults to `False`): Model includes a distillation token and head as in DeiT models. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. drop_rate (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. attn_drop_rate (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.0): The stochastic depth rate. output_a3_attentions (`bool`, *optional*, defaults to `False`): Whether or not the model should returns A^3 module attentions. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MgpstrConfig, MgpstrForSceneTextRecognition >>> # Initializing a Mgpstr mgp-str-base style configuration >>> configuration = MgpstrConfig() >>> # Initializing a model (with random weights) from the mgp-str-base style configuration >>> model = MgpstrForSceneTextRecognition(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mgp-str" def __init__( self, image_size=[32, 128], patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=50257, num_wordpiece_labels=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4.0, qkv_bias=True, distilled=False, layer_norm_eps=1e-5, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, output_a3_attentions=False, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.distilled = distilled self.layer_norm_eps = layer_norm_eps self.drop_rate = drop_rate self.qkv_bias = qkv_bias self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.output_a3_attentions = output_a3_attentions self.initializer_range = initializer_range
class_definition
784
5,780
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/configuration_mgp_str.py
null
5,981
class MgpstrTokenizer(PreTrainedTokenizer): """ Construct a MGP-STR char tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str`, *optional*, defaults to `"[GO]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"[GO]"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"[s]"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. """ vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, unk_token="[GO]", bos_token="[GO]", eos_token="[s]", pad_token="[GO]", **kwargs): with open(vocab_file, encoding="utf-8") as vocab_handle: self.vocab = json.load(vocab_handle) self.decoder = {v: k for k, v in self.vocab.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs, ) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): vocab = dict(self.vocab).copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): """Tokenize a string.""" char_tokens = [] for s in text: char_tokens.extend(s) return char_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,)
class_definition
884
3,775
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/tokenization_mgp_str.py
null
5,982
class DecodeType(ExplicitEnum): CHARACTER = "char" BPE = "bpe" WORDPIECE = "wp"
class_definition
914
1,005
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/processing_mgp_str.py
null
5,983
class MgpstrProcessor(ProcessorMixin): r""" Constructs a MGP-STR processor which wraps an image processor and MGP-STR tokenizers into a single [`MgpstrProcessor`] offers all the functionalities of `ViTImageProcessor`] and [`MgpstrTokenizer`]. See the [`~MgpstrProcessor.__call__`] and [`~MgpstrProcessor.batch_decode`] for more information. Args: image_processor (`ViTImageProcessor`, *optional*): An instance of `ViTImageProcessor`. The image processor is a required input. tokenizer ([`MgpstrTokenizer`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "char_tokenizer"] image_processor_class = "ViTImageProcessor" char_tokenizer_class = "MgpstrTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") self.char_tokenizer = tokenizer self.bpe_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") self.wp_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, return_tensors=None, **kwargs): """ When used in normal mode, this method forwards all its arguments to ViTImageProcessor's [`~ViTImageProcessor.__call__`] and returns its output. This method also forwards the `text` and `kwargs` arguments to MgpstrTokenizer's [`~MgpstrTokenizer.__call__`] if `text` is not `None` to encode the text. Please refer to the doctsring of the above methods for more information. """ if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor(images, return_tensors=return_tensors, **kwargs) if text is not None: encodings = self.char_tokenizer(text, return_tensors=return_tensors, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, sequences): """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `Dict[str, any]`: Dictionary of all the outputs of the decoded results. generated_text (`List[str]`): The final results after fusion of char, bpe, and wp. scores (`List[float]`): The final scores after fusion of char, bpe, and wp. char_preds (`List[str]`): The list of character decoded sentences. bpe_preds (`List[str]`): The list of bpe decoded sentences. wp_preds (`List[str]`): The list of wp decoded sentences. This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ char_preds, bpe_preds, wp_preds = sequences batch_size = char_preds.size(0) char_strs, char_scores = self._decode_helper(char_preds, "char") bpe_strs, bpe_scores = self._decode_helper(bpe_preds, "bpe") wp_strs, wp_scores = self._decode_helper(wp_preds, "wp") final_strs = [] final_scores = [] for i in range(batch_size): scores = [char_scores[i], bpe_scores[i], wp_scores[i]] strs = [char_strs[i], bpe_strs[i], wp_strs[i]] max_score_index = scores.index(max(scores)) final_strs.append(strs[max_score_index]) final_scores.append(scores[max_score_index]) out = {} out["generated_text"] = final_strs out["scores"] = final_scores out["char_preds"] = char_strs out["bpe_preds"] = bpe_strs out["wp_preds"] = wp_strs return out def _decode_helper(self, pred_logits, format): """ Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer. Args: pred_logits (`torch.Tensor`): List of model prediction logits. format (`Union[DecoderType, str]`): Type of model prediction. Must be one of ['char', 'bpe', 'wp']. Returns: `tuple`: dec_strs(`str`): The decode strings of model prediction. conf_scores(`List[float]`): The confidence score of model prediction. """ if format == DecodeType.CHARACTER: decoder = self.char_decode eos_token = 1 eos_str = "[s]" elif format == DecodeType.BPE: decoder = self.bpe_decode eos_token = 2 eos_str = "#" elif format == DecodeType.WORDPIECE: decoder = self.wp_decode eos_token = 102 eos_str = "[SEP]" else: raise ValueError(f"Format {format} is not supported.") dec_strs, conf_scores = [], [] batch_size = pred_logits.size(0) batch_max_length = pred_logits.size(1) _, preds_index = pred_logits.topk(1, dim=-1, largest=True, sorted=True) preds_index = preds_index.view(-1, batch_max_length)[:, 1:] preds_str = decoder(preds_index) preds_max_prob, _ = torch.nn.functional.softmax(pred_logits, dim=2).max(dim=2) preds_max_prob = preds_max_prob[:, 1:] for index in range(batch_size): pred_eos = preds_str[index].find(eos_str) pred = preds_str[index][:pred_eos] pred_index = preds_index[index].cpu().tolist() pred_eos_index = pred_index.index(eos_token) if eos_token in pred_index else -1 pred_max_prob = preds_max_prob[index][: pred_eos_index + 1] confidence_score = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(pred) conf_scores.append(confidence_score) return dec_strs, conf_scores def char_decode(self, sequences): """ Convert a list of lists of char token ids into a list of strings by calling char tokenizer. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `List[str]`: The list of char decoded sentences. """ decode_strs = [seq.replace(" ", "") for seq in self.char_tokenizer.batch_decode(sequences)] return decode_strs def bpe_decode(self, sequences): """ Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `List[str]`: The list of bpe decoded sentences. """ return self.bpe_tokenizer.batch_decode(sequences) def wp_decode(self, sequences): """ Convert a list of lists of word piece token ids into a list of strings by calling word piece tokenizer. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `List[str]`: The list of wp decoded sentences. """ decode_strs = [seq.replace(" ", "") for seq in self.wp_tokenizer.batch_decode(sequences)] return decode_strs
class_definition
1,102
9,297
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/processing_mgp_str.py
null
5,984
class MgpstrDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
2,613
3,093
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,985
class MgpstrModelOutput(ModelOutput): """ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. Args: logits (`tuple(torch.FloatTensor)` of shape `(batch_size, config.num_character_labels)`): Tuple of `torch.FloatTensor` (one for the output of character of shape `(batch_size, config.max_token_length, config.num_character_labels)`, + one for the output of bpe of shape `(batch_size, config.max_token_length, config.num_bpe_labels)`, + one for the output of wordpiece of shape `(batch_size, config.max_token_length, config.num_wordpiece_labels)`) . Classification scores (before SoftMax) of character, bpe and wordpiece. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, config.max_token_length, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. a3_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_a3_attentions=True` is passed or when `config.output_a3_attentions=True`): Tuple of `torch.FloatTensor` (one for the attention of character, + one for the attention of bpe`, + one for the attention of wordpiece) of shape `(batch_size, config.max_token_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: Tuple[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None a3_attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
3,107
5,572
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,986
class MgpstrEmbeddings(nn.Module): """2D Image to Patch Embedding""" def __init__(self, config: MgpstrConfig): super().__init__() image_size = ( config.image_size if isinstance(config.image_size, collections.abc.Iterable) else (config.image_size, config.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) self.image_size = image_size self.patch_size = patch_size self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.num_tokens = 2 if config.distilled else 1 self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size) self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + self.num_tokens, config.hidden_size)) self.pos_drop = nn.Dropout(p=config.drop_rate) def forward(self, pixel_values): batch_size, channel, height, width = pixel_values.shape if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) patch_embeddings = self.proj(pixel_values) patch_embeddings = patch_embeddings.flatten(2).transpose(1, 2) # BCHW -> BNC cls_tokens = self.cls_token.expand(batch_size, -1, -1) embedding_output = torch.cat((cls_tokens, patch_embeddings), dim=1) embedding_output = embedding_output + self.pos_embed embedding_output = self.pos_drop(embedding_output) return embedding_output
class_definition
5,575
7,513
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,987
class MgpstrMlp(nn.Module): """MLP as used in Vision Transformer, MLP-Mixer and related networks""" def __init__(self, config: MgpstrConfig, hidden_features): super().__init__() hidden_features = hidden_features or config.hidden_size self.fc1 = nn.Linear(config.hidden_size, hidden_features) self.act = nn.GELU() self.fc2 = nn.Linear(hidden_features, config.hidden_size) self.drop = nn.Dropout(config.drop_rate) def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.drop(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.drop(hidden_states) return hidden_states
class_definition
7,516
8,294
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,988
class MgpstrAttention(nn.Module): def __init__(self, config: MgpstrConfig): super().__init__() self.num_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) self.attn_drop = nn.Dropout(config.attn_drop_rate) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj_drop = nn.Dropout(config.drop_rate) def forward(self, hidden_states): batch_size, num, channel = hidden_states.shape qkv = ( self.qkv(hidden_states) .reshape(batch_size, num, 3, self.num_heads, channel // self.num_heads) .permute(2, 0, 3, 1, 4) ) query, key, value = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attention_probs = (query @ key.transpose(-2, -1)) * self.scale attention_probs = attention_probs.softmax(dim=-1) attention_probs = self.attn_drop(attention_probs) context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, num, channel) context_layer = self.proj(context_layer) context_layer = self.proj_drop(context_layer) return (context_layer, attention_probs)
class_definition
8,297
9,660
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,989
class MgpstrLayer(nn.Module): def __init__(self, config: MgpstrConfig, drop_path=None): super().__init__() self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attn = MgpstrAttention(config) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = MgpstrDropPath(drop_path) if drop_path is not None else nn.Identity() self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) mlp_hidden_dim = int(config.hidden_size * config.mlp_ratio) self.mlp = MgpstrMlp(config, mlp_hidden_dim) def forward(self, hidden_states): self_attention_outputs = self.attn(self.norm1(hidden_states)) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1] # first residual connection hidden_states = self.drop_path(attention_output) + hidden_states # second residual connection is done here layer_output = hidden_states + self.drop_path(self.mlp(self.norm2(hidden_states))) outputs = (layer_output, outputs) return outputs
class_definition
9,663
10,824
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,990
class MgpstrEncoder(nn.Module): def __init__(self, config: MgpstrConfig): super().__init__() # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] self.blocks = nn.Sequential( *[MgpstrLayer(config=config, drop_path=dpr[i]) for i in range(config.num_hidden_layers)] ) def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for _, blk in enumerate(self.blocks): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = blk(hidden_states) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class_definition
10,827
12,241
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,991
class MgpstrA3Module(nn.Module): def __init__(self, config: MgpstrConfig): super().__init__() self.token_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.tokenLearner = nn.Sequential( nn.Conv2d(config.hidden_size, config.hidden_size, kernel_size=(1, 1), stride=1, groups=8, bias=False), nn.Conv2d(config.hidden_size, config.max_token_length, kernel_size=(1, 1), stride=1, bias=False), ) self.feat = nn.Conv2d( config.hidden_size, config.hidden_size, kernel_size=(1, 1), stride=1, groups=8, bias=False ) self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.token_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2).unsqueeze(-1) selected = self.tokenLearner(hidden_states) selected = selected.flatten(2) attentions = F.softmax(selected, dim=-1) feat = self.feat(hidden_states) feat = feat.flatten(2).transpose(1, 2) feat = torch.einsum("...si,...id->...sd", attentions, feat) a3_out = self.norm(feat) return (a3_out, attentions)
class_definition
12,244
13,465
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,992
class MgpstrPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MgpstrConfig base_model_prefix = "mgp_str" _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, MgpstrEmbeddings): nn.init.trunc_normal_(module.pos_embed, mean=0.0, std=self.config.initializer_range) nn.init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range) elif isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
13,468
14,505
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,993
class MgpstrModel(MgpstrPreTrainedModel): def __init__(self, config: MgpstrConfig): super().__init__(config) self.config = config self.embeddings = MgpstrEmbeddings(config) self.encoder = MgpstrEncoder(config) def get_input_embeddings(self) -> nn.Module: return self.embeddings.proj @add_start_docstrings_to_model_forward(MGP_STR_INPUTS_DOCSTRING) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return encoder_outputs return BaseModelOutput( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
class_definition
16,101
17,796
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,994
class MgpstrForSceneTextRecognition(MgpstrPreTrainedModel): config_class = MgpstrConfig main_input_name = "pixel_values" def __init__(self, config: MgpstrConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.mgp_str = MgpstrModel(config) self.char_a3_module = MgpstrA3Module(config) self.bpe_a3_module = MgpstrA3Module(config) self.wp_a3_module = MgpstrA3Module(config) self.char_head = nn.Linear(config.hidden_size, config.num_character_labels) self.bpe_head = nn.Linear(config.hidden_size, config.num_bpe_labels) self.wp_head = nn.Linear(config.hidden_size, config.num_wordpiece_labels) @add_start_docstrings_to_model_forward(MGP_STR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MgpstrModelOutput, config_class=MgpstrConfig) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_a3_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], MgpstrModelOutput]: r""" output_a3_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of a3 modules. See `a3_attentions` under returned tensors for more detail. Returns: Example: ```python >>> from transformers import ( ... MgpstrProcessor, ... MgpstrForSceneTextRecognition, ... ) >>> import requests >>> from PIL import Image >>> # load image from the IIIT-5k dataset >>> url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> processor = MgpstrProcessor.from_pretrained("alibaba-damo/mgp-str-base") >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values >>> model = MgpstrForSceneTextRecognition.from_pretrained("alibaba-damo/mgp-str-base") >>> # inference >>> outputs = model(pixel_values) >>> out_strs = processor.batch_decode(outputs.logits) >>> out_strs["generated_text"] '["ticket"]' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict mgp_outputs = self.mgp_str( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = mgp_outputs[0] char_a3_out, char_attention = self.char_a3_module(sequence_output) bpe_a3_out, bpe_attention = self.bpe_a3_module(sequence_output) wp_a3_out, wp_attention = self.wp_a3_module(sequence_output) char_logits = self.char_head(char_a3_out) bpe_logits = self.bpe_head(bpe_a3_out) wp_logits = self.wp_head(wp_a3_out) all_a3_attentions = (char_attention, bpe_attention, wp_attention) if output_a3_attentions else None all_logits = (char_logits, bpe_logits, wp_logits) if not return_dict: outputs = (all_logits, all_a3_attentions) + mgp_outputs[1:] return tuple(output for output in outputs if output is not None) return MgpstrModelOutput( logits=all_logits, hidden_states=mgp_outputs.hidden_states, attentions=mgp_outputs.attentions, a3_attentions=all_a3_attentions, )
class_definition
18,062
21,923
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mgp_str/modeling_mgp_str.py
null
5,995
class MobileBertTokenizer(PreTrainedTokenizer): r""" Construct a MobileBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original MobileBERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize( text, never_split=self.all_special_tokens if not split_special_tokens else None ): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A MobileBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,)
class_definition
1,784
12,477
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilebert/tokenization_mobilebert.py
null
5,996
class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output)
class_definition
12,552
19,300
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilebert/tokenization_mobilebert.py
null
5,997
class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
class_definition
19,379
21,267
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilebert/tokenization_mobilebert.py
null
5,998
class TFMobileBertPreTrainingLoss: """ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1]) ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype) masked_ns_loss = unmasked_ns_loss * ns_loss_mask reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
class_definition
2,765
4,383
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py
null
5,999