text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class TFSwinPatchMerging(keras.layers.Layer): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`keras.layer.Layer`, *optional*, defaults to `keras.layers.LayerNormalization`): Normalization layer class. """ def __init__( self, input_resolution: Tuple[int, int], dim: int, norm_layer: Optional[Callable] = None, **kwargs ) -> None: super().__init__(**kwargs) self.input_resolution = input_resolution self.dim = dim self.reduction = keras.layers.Dense(2 * dim, use_bias=False, name="reduction") if norm_layer is None: # Use same default epsilon as PyTorch self.norm = keras.layers.LayerNormalization(epsilon=1e-5, name="norm") else: self.norm = norm_layer(name="norm") def maybe_pad(self, input_feature: tf.Tensor, height: int, width: int) -> tf.Tensor: should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = ((0, 0), (0, height % 2), (0, width % 2), (0, 0)) input_feature = tf.pad(input_feature, pad_values) return input_feature def call(self, input_feature: tf.Tensor, input_dimensions: Tuple[int, int], training: bool = False) -> tf.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, _, num_channels = shape_list(input_feature) input_feature = tf.reshape(input_feature, (batch_size, height, width, num_channels)) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = tf.concat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = tf.reshape( input_feature, (batch_size, -1, 4 * num_channels) ) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature, training=training) input_feature = self.reduction(input_feature, training=training) return input_feature def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "reduction", None) is not None: with tf.name_scope(self.reduction.name): self.reduction.build([None, None, 4 * self.dim]) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build([None, None, 4 * self.dim])
class_definition
18,107
21,246
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,600
class TFSwinDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: float = None, scale_by_keep: bool = True, **kwargs) -> None: super(TFSwinDropPath, self).__init__(**kwargs) self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def call(self, input: tf.Tensor, training: bool = False) -> tf.Tensor: return drop_path(input, self.drop_prob, training, self.scale_by_keep)
class_definition
21,249
21,772
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,601
class TFSwinSelfAttention(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: super().__init__(**kwargs) if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size window_size = config.window_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) def build(self, input_shape: tf.TensorShape) -> None: self.relative_position_bias_table = self.add_weight( shape=(((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1)), self.num_attention_heads), initializer="zeros", name="relative_position_bias_table", ) self.relative_position_index = self.add_weight( shape=(self.window_size[0] ** 2, self.window_size[1] ** 2), trainable=False, dtype=tf.int32, name="relative_position_index", ) # get pair-wise relative position index for each token inside the window coords_h = tf.range(self.window_size[0]) coords_w = tf.range(self.window_size[1]) coords = tf.stack(tf.meshgrid(coords_h, coords_w, indexing="ij")) coords_flatten = tf.reshape(coords, (shape_list(coords)[0], -1)) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = tf.transpose(relative_coords, (1, 2, 0)) stack_0, stack_1 = tf.unstack(relative_coords, axis=2) stack_0 += self.window_size[0] - 1 stack_0 *= 2 * self.window_size[1] - 1 stack_1 += self.window_size[1] - 1 relative_coords = tf.stack([stack_0, stack_1], axis=2) self.relative_position_index.assign(tf.cast(tf.reduce_sum(relative_coords, axis=-1), tf.int32)) if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.all_head_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.all_head_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.all_head_size]) def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> Tuple[tf.Tensor, ...]: batch_size, dim, _ = shape_list(hidden_states) mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, (0, 1, 3, 2))) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = tf.gather( self.relative_position_bias_table, tf.reshape(self.relative_position_index, (-1,)) ) relative_position_bias = tf.reshape( relative_position_bias, (self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1), ) relative_position_bias = tf.transpose(relative_position_bias, (2, 0, 1)) attention_scores = attention_scores + tf.expand_dims(relative_position_bias, 0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SwinModel call() function) mask_shape = shape_list(attention_mask)[0] attention_scores = tf.reshape( attention_scores, (batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) ) attention_mask = tf.expand_dims(attention_mask, 1) attention_mask = tf.expand_dims(attention_mask, 0) attention_scores = attention_scores + attention_mask attention_scores = tf.reshape(attention_scores, (-1, self.num_attention_heads, dim, dim)) # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, (0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [ self.all_head_size, ] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class_definition
21,775
28,402
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,602
class TFSwinSelfOutput(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(dim, name="dense") self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob, name="dropout") self.dim = dim def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.dim]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None)
class_definition
28,405
29,407
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,603
class TFSwinAttention(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: super().__init__(**kwargs) self.self = TFSwinSelfAttention(config, dim, num_heads, name="self") self.self_output = TFSwinSelfOutput(config, dim, name="output") self.pruned_heads = set() def prune_heads(self, heads): """ Prunes heads of the model. See base class PreTrainedModel heads: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tf.Tensor: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions, training=training) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None)
class_definition
29,410
30,968
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,604
class TFSwinIntermediate(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(int(config.mlp_ratio * dim), name="dense") if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dim = dim def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.dim])
class_definition
30,971
31,882
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,605
class TFSwinOutput(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(dim, name="dense") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, "dropout") self.config = config self.dim = dim def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, int(self.config.mlp_ratio * self.dim)])
class_definition
31,885
32,755
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,606
class TFSwinLayer(keras.layers.Layer): def __init__( self, config, dim, input_resolution: Tuple[int, int], num_heads: int, drop_path_rate: float = 0.0, shift_size: int = 0, **kwargs, ) -> None: super().__init__(**kwargs) self.chunk_size_feed_forward = config.chunk_size_feed_forward min_res = tf.reduce_min(input_resolution) self.window_size = min_res if min_res <= config.window_size else config.window_size self.shift_size = 0 if min_res <= self.window_size else shift_size self.input_resolution = input_resolution self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.attention = TFSwinAttention(config, dim, num_heads, name="attention") self.drop_path = ( TFSwinDropPath(drop_path_rate, name="drop_path") if drop_path_rate > 0.0 else keras.layers.Activation("linear", name="drop_path") ) self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.intermediate = TFSwinIntermediate(config, dim, name="intermediate") self.swin_output = TFSwinOutput(config, dim, name="output") self.dim = dim def get_attn_mask(self, height: int, width: int, window_size: int, shift_size: int) -> tf.Tensor | None: img_mask = tf.zeros((height, width)) height_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) width_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) # calculate attention mask for SW-MSA if shift_size > 0: count = 0 for height_slice in height_slices: for width_slice in width_slices: height_inds = tf.range(height_slice[0] % height, height_slice[1] % height + 1) width_inds = tf.range(width_slice[0] % width, width_slice[1] % width + 1) indices = tf.reshape(tf.stack(tf.meshgrid(height_inds, width_inds), axis=-1), (-1, 2)) if len(indices) >= 1: updates = tf.ones((len(indices),), dtype=img_mask.dtype) * count img_mask = tf.tensor_scatter_nd_update(img_mask, indices, updates) count += 1 img_mask = tf.expand_dims(img_mask, -1) img_mask = tf.expand_dims(img_mask, 0) mask_windows = window_partition(img_mask, window_size) mask_windows = tf.reshape(mask_windows, (-1, window_size * window_size)) attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2) attn_mask = tf.where(attn_mask != 0, float(-100.0), attn_mask) attn_mask = tf.where(attn_mask == 0, float(0.0), attn_mask) return attn_mask def maybe_pad( self, hidden_states: tf.Tensor, window_size: int, height: int, width: int ) -> Tuple[tf.Tensor, tf.Tensor]: pad_right = (window_size - width % window_size) % window_size pad_bottom = (window_size - height % window_size) % window_size pad_values = [[0, 0], [0, pad_bottom], [0, pad_right], [0, 0]] hidden_states = tf.pad(hidden_states, pad_values) pad_values = tf.reshape(pad_values, (-1,)) return hidden_states, pad_values def call( self, hidden_states: tf.Tensor, input_dimensions: Tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tf.Tensor: # if window size is larger than input resolution, we don't partition windows min_res = tf.reduce_min(input_dimensions) shift_size = 0 if min_res <= self.window_size else self.shift_size window_size = min_res if min_res <= self.window_size else self.window_size height, width = input_dimensions batch_size, _, channels = shape_list(hidden_states) shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states, training=training) hidden_states = tf.reshape(hidden_states, (batch_size, height, width, channels)) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, window_size, height, width) _, height_pad, width_pad, _ = shape_list(hidden_states) # cyclic shift if shift_size > 0: shifted_hidden_states = tf.roll(hidden_states, shift=(-shift_size, -shift_size), axis=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, window_size) hidden_states_windows = tf.reshape(hidden_states_windows, (-1, window_size * window_size, channels)) attn_mask = self.get_attn_mask( height=height_pad, width=width_pad, window_size=window_size, shift_size=shift_size ) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions, training=training ) attention_output = attention_outputs[0] attention_windows = tf.reshape(attention_output, (-1, window_size, window_size, channels)) shifted_windows = window_reverse(attention_windows, window_size, height_pad, width_pad) # reverse cyclic shift if shift_size > 0: attention_windows = tf.roll(shifted_windows, shift=(shift_size, shift_size), axis=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :] attention_windows = tf.reshape(attention_windows, (batch_size, height * width, channels)) hidden_states = shortcut + self.drop_path(attention_windows, training=training) layer_output = self.layernorm_after(hidden_states, training=training) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.swin_output(layer_output, training=training) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.dim]) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.dim]) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "swin_output", None) is not None: with tf.name_scope(self.swin_output.name): self.swin_output.build(None)
class_definition
32,758
40,264
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,607
class TFSwinStage(keras.layers.Layer): def __init__( self, config: SwinConfig, dim: int, input_resolution: Tuple[int, int], depth: int, num_heads: int, drop_path: List[float], downsample: Optional[Callable], **kwargs, ) -> None: super().__init__(**kwargs) self.config = config self.dim = dim self.blocks = [ TFSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, drop_path_rate=drop_path[i], name=f"blocks.{i}", ) for i in range(depth) ] # patch merging layer if downsample is not None: self.downsample = downsample( input_resolution, dim=dim, norm_layer=partial(keras.layers.LayerNormalization, epsilon=1e-5), name="downsample", ) else: self.downsample = None self.pointing = False def call( self, hidden_states: tf.Tensor, input_dimensions: Tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor, ...]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training ) hidden_states = layer_outputs[0] if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(layer_outputs[0], input_dimensions, training=training) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "downsample", None) is not None: with tf.name_scope(self.downsample.name): self.downsample.build(None) if getattr(self, "blocks", None) is not None: for layer in self.blocks: with tf.name_scope(layer.name): layer.build(None)
class_definition
40,267
43,094
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,608
class TFSwinEncoder(keras.layers.Layer): def __init__(self, config: SwinConfig, grid_size: Tuple[int, int], **kwargs): super().__init__(**kwargs) self.num_layers = len(config.depths) self.config = config dpr = list((tf.linspace(0, 1, sum(config.depths)) * config.drop_path_rate).numpy()) self.layers = [ TFSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=TFSwinPatchMerging if (i_layer < self.num_layers - 1) else None, name=f"layers.{i_layer}", ) for i_layer in range(self.num_layers) ] self.gradient_checkpointing = False def call( self, hidden_states: tf.Tensor, input_dimensions: Tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> Union[Tuple[tf.Tensor, ...], TFSwinEncoderOutput]: all_input_dimensions = () all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = shape_list(hidden_states) # rearrange b (h w) c -> b c h w reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training ) hidden_states = layer_outputs[0] output_dimensions = layer_outputs[1] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) all_input_dimensions += (input_dimensions,) if output_hidden_states: batch_size, _, hidden_size = shape_list(hidden_states) # rearrange b (h w) c -> b c h w reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFSwinEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None)
class_definition
43,097
46,925
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,609
class TFSwinPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwinConfig base_model_prefix = "swin" main_input_name = "pixel_values"
class_definition
46,928
47,219
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,610
class AdaptiveAveragePooling1D(keras.layers.Layer): """ Args: Average 1D Pooling with adaptive kernel size. output_size: An integer or tuple/list of a single integer, specifying pooled_features. The new size of output channels. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch, steps, channels)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch, channels, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, pooled_steps, channels)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, channels, pooled_steps)`. Adapted from [tensorflow-addon's adaptive pooling.py]( https://github.com/tensorflow/addons/blob/8cec33fcaaf1cf90aec7bdd55a0fcdbb251ce5c2/tensorflow_addons/layers/adaptive_pooling.py#L90-L120 ) """ def __init__( self, output_size: Union[int, Iterable[int]], reduce_function: Callable = tf.reduce_mean, data_format: Optional[str] = None, **kwargs, ) -> None: self.data_format = normalize_data_format(data_format) self.reduce_function = reduce_function self.output_size = (output_size,) if isinstance(output_size, int) else tuple(output_size) super().__init__(**kwargs) def call(self, inputs: tf.Tensor, *args) -> None: bins = self.output_size[0] if self.data_format == "channels_last": splits = tf.split(inputs, bins, axis=1) splits = tf.stack(splits, axis=1) out_vect = self.reduce_function(splits, axis=2) else: splits = tf.split(inputs, bins, axis=2) splits = tf.stack(splits, axis=2) out_vect = self.reduce_function(splits, axis=3) return out_vect def compute_output_shape(self, input_shape: Iterable[int]) -> tf.TensorShape: input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_last": shape = tf.TensorShape([input_shape[0], self.output_size[0], input_shape[2]]) else: shape = tf.TensorShape([input_shape[0], input_shape[1], self.output_size[0]]) return shape def get_config(self) -> Dict[str, Any]: config = { "output_size": self.output_size, "data_format": self.data_format, } base_config = super().get_config() return {**base_config, **config}
class_definition
49,540
52,347
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,611
class TFSwinMainLayer(keras.layers.Layer): config_class = SwinConfig def __init__( self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs ) -> None: super().__init__(**kwargs) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = TFSwinEmbeddings(config, use_mask_token=use_mask_token, name="embeddings") self.encoder = TFSwinEncoder(config, self.embeddings.patch_grid, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.pooler = AdaptiveAveragePooling1D(output_size=(1,)) if add_pooling_layer else None def get_input_embeddings(self) -> TFSwinPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List]): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_head_mask(self, head_mask: Optional[Any]) -> List: if head_mask is not None: raise NotImplementedError return [None] * len(self.config.depths) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSwinModelOutput, Tuple[tf.Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask) embedding_output, input_dimensions = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, training=training ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output, training=training) pooled_output = None if self.pooler is not None: batch_size, _, num_features = shape_list(sequence_output) pooled_output = self.pooler(sequence_output) pooled_output = tf.reshape(pooled_output, (batch_size, num_features)) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return TFSwinModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.num_features])
class_definition
52,370
56,989
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,612
class TFSwinModel(TFSwinPreTrainedModel): def __init__( self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs ) -> None: super().__init__(config, **kwargs) self.config = config self.swin = TFSwinMainLayer(config, name="swin") @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSwinModelOutput, Tuple[tf.Tensor, ...]]: r""" bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") swin_outputs = self.swin( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return swin_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None)
class_definition
57,145
59,454
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,613
class TFSwinPixelShuffle(keras.layers.Layer): """TF layer implementation of torch.nn.PixelShuffle""" def __init__(self, upscale_factor: int, **kwargs) -> None: super().__init__(**kwargs) if not isinstance(upscale_factor, int) or upscale_factor < 2: raise ValueError(f"upscale_factor must be an integer value >= 2 got {upscale_factor}") self.upscale_factor = upscale_factor def call(self, x: tf.Tensor) -> tf.Tensor: hidden_states = x batch_size, _, _, num_input_channels = shape_list(hidden_states) block_size_squared = self.upscale_factor**2 output_depth = int(num_input_channels / block_size_squared) # When the number of output channels >= 2, PyTorch's PixelShuffle and # TF's depth_to_space differ in their output as the order of channels selected for combining # is a permutation of the other c.f. # https://stackoverflow.com/questions/68272502/tf-depth-to-space-not-same-as-torchs-pixelshuffle-when-output-channels-1 permutation = tf.constant( [[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]] ) hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1) hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format="NHWC") return hidden_states
class_definition
59,457
60,904
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,614
class TFSwinDecoder(keras.layers.Layer): def __init__(self, config: SwinConfig, **kwargs): super().__init__(**kwargs) self.conv2d = keras.layers.Conv2D( filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, strides=1, name="0" ) self.pixel_shuffle = TFSwinPixelShuffle(config.encoder_stride, name="1") self.config = config def call(self, x: tf.Tensor) -> tf.Tensor: hidden_states = x # B,C,H,W -> B,H,W,C hidden_states = tf.transpose(hidden_states, (0, 2, 3, 1)) hidden_states = self.conv2d(hidden_states) hidden_states = self.pixel_shuffle(hidden_states) # B,H,W,C -> B,C,H,W hidden_states = tf.transpose(hidden_states, (0, 3, 1, 2)) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv2d", None) is not None: with tf.name_scope(self.conv2d.name): self.conv2d.build([None, None, None, self.config.hidden_size]) if getattr(self, "pixel_shuffle", None) is not None: with tf.name_scope(self.pixel_shuffle.name): self.pixel_shuffle.build(None)
class_definition
60,907
62,160
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,615
class TFSwinForMaskedImageModeling(TFSwinPreTrainedModel): def __init__(self, config: SwinConfig): super().__init__(config) self.swin = TFSwinMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="swin") self.decoder = TFSwinDecoder(config, name="decoder") @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFSwinMaskedImageModelingOutput]: r""" bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFSwinForMaskedImageModeling >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = TFSwinForMaskedImageModeling.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="tf").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = tf.random.uniform((1, num_patches)) >= 0.5 >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 224, 224] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swin( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = tf.transpose(sequence_output, (0, 2, 1)) batch_size, num_channels, sequence_length = shape_list(sequence_output) height = width = int(sequence_length**0.5) sequence_output = tf.reshape(sequence_output, (batch_size, num_channels, height, width)) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size)) mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1) mask = tf.repeat(mask, self.config.patch_size, 2) mask = tf.expand_dims(mask, 1) mask = tf.cast(mask, tf.float32) reconstruction_loss = keras.losses.mean_absolute_error( # Swap axes as metric calculation reduces over the final dimension tf.transpose(pixel_values, (1, 2, 3, 0)), tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)), ) reconstruction_loss = tf.expand_dims(reconstruction_loss, 0) total_loss = tf.reduce_sum(reconstruction_loss * mask) num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels masked_im_loss = total_loss / num_masked_pixels masked_im_loss = tf.reshape(masked_im_loss, (1,)) if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return TFSwinMaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None)
class_definition
62,347
67,481
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,616
class TFSwinForImageClassification(TFSwinPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: SwinConfig): super().__init__(config) self.num_labels = config.num_labels self.swin = TFSwinMainLayer(config, name="swin") # Classifier head self.classifier = ( keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier") ) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSwinImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple[tf.Tensor, ...], TFSwinImageClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swin( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] logits = self.classifier(pooled_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSwinImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.swin.num_features])
class_definition
67,712
70,789
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/swin/modeling_tf_swin.py
null
3,617
class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileNetV1ImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
831
1,221
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py
null
3,618
class MobileNetV1ImageProcessor(BaseImageProcessor): r""" Constructs a MobileNetV1 image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 256} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if "shortest_edge" in size: size = size["shortest_edge"] default_to_square = False elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, default_to_square=default_to_square, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) all_images.append(image) images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,360
15,237
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py
null
3,619
class MobileNetV1ConvLayer(nn.Module): def __init__( self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int] = 1, groups: Optional[int] = 1, bias: bool = False, use_normalization: Optional[bool] = True, use_activation: Optional[bool or str] = True, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps, momentum=0.9997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features
class_definition
6,894
9,133
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
null
3,620
class MobileNetV1PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileNetV1Config load_tf_weights = load_tf_weights_in_mobilenet_v1 base_model_prefix = "mobilenet_v1" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
9,136
10,030
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
null
3,621
class MobileNetV1Model(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True): super().__init__(config) self.config = config depth = 32 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.conv_stem = MobileNetV1ConvLayer( config, in_channels=config.num_channels, out_channels=out_channels, kernel_size=3, stride=2, ) strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] self.layer = nn.ModuleList() for i in range(13): in_channels = out_channels if strides[i] == 2 or i == 0: depth *= 2 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides[i], groups=in_channels, ) ) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, ) ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values) all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = hidden_states if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, )
class_definition
11,444
14,929
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
null
3,622
class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v1 = MobileNetV1Model(config) last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
class_definition
15,142
18,593
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py
null
3,623
class MobileNetV1Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileNetV1 [google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. depth_multiplier (`float`, *optional*, defaults to 1.0): Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32 channels. This is sometimes also called "alpha" or "width multiplier". min_depth (`int`, *optional*, defaults to 8): All layers will have at least this many channels. hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. tf_padding (`bool`, *optional*, defaults to `True`): Whether to use TensorFlow padding rules on the convolution layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.999): The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 0.001): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import MobileNetV1Config, MobileNetV1Model >>> # Initializing a "mobilenet_v1_1.0_224" style configuration >>> configuration = MobileNetV1Config() >>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration >>> model = MobileNetV1Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilenet_v1" def __init__( self, num_channels=3, image_size=224, depth_multiplier=1.0, min_depth=8, hidden_act="relu6", tf_padding=True, classifier_dropout_prob=0.999, initializer_range=0.02, layer_norm_eps=0.001, **kwargs, ): super().__init__(**kwargs) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero.") self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.hidden_act = hidden_act self.tf_padding = tf_padding self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps
class_definition
914
4,263
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
null
3,624
class MobileNetV1OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})]) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) @property def atol_for_validation(self) -> float: return 1e-4
class_definition
4,266
4,870
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py
null
3,625
class TFConvNextDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x
class_definition
1,492
2,209
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,626
class TFConvNextEmbeddings(keras.layers.Layer): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config: ConvNextConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = keras.layers.Conv2D( filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name="patch_embeddings", kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), ) self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") self.num_channels = config.num_channels self.config = config def call(self, pixel_values): if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"] tf.debugging.assert_equal( shape_list(pixel_values)[1], self.num_channels, message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build([None, None, None, self.config.num_channels]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
class_definition
2,212
4,328
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,627
class TFConvNextLayer(keras.layers.Layer): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow NHWC ordering, we can just apply the operations straight-away without the permutation. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0.0, **kwargs): super().__init__(**kwargs) self.dim = dim self.config = config self.dwconv = keras.layers.Conv2D( filters=dim, kernel_size=7, padding="same", groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="dwconv", ) # depthwise conv self.layernorm = keras.layers.LayerNormalization( epsilon=1e-6, name="layernorm", ) self.pwconv1 = keras.layers.Dense( units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv1", ) # pointwise/1x1 convs, implemented with linear layers self.act = get_tf_activation(config.hidden_act) self.pwconv2 = keras.layers.Dense( units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv2", ) # Using `layers.Activation` instead of `tf.identity` to better control `training` # behaviour. self.drop_path = ( TFConvNextDropPath(drop_path, name="drop_path") if drop_path > 0.0 else keras.layers.Activation("linear", name="drop_path") ) def build(self, input_shape: tf.TensorShape = None): # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa) self.layer_scale_parameter = ( self.add_weight( shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name="layer_scale_parameter", ) if self.config.layer_scale_init_value > 0 else None ) if self.built: return self.built = True if getattr(self, "dwconv", None) is not None: with tf.name_scope(self.dwconv.name): self.dwconv.build([None, None, None, self.dim]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.dim]) if getattr(self, "pwconv1", None) is not None: with tf.name_scope(self.pwconv1.name): self.pwconv1.build([None, None, self.dim]) if getattr(self, "pwconv2", None) is not None: with tf.name_scope(self.pwconv2.name): self.pwconv2.build([None, None, 4 * self.dim]) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) def call(self, hidden_states, training=False): input = hidden_states x = self.dwconv(hidden_states) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = input + self.drop_path(x, training=training) return x
class_definition
4,331
8,391
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,628
class TFConvNextStage(keras.layers.Layer): """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config (`ConvNextV2Config`): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """ def __init__( self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int = 2, stride: int = 2, depth: int = 2, drop_path_rates: Optional[List[float]] = None, **kwargs, ): super().__init__(**kwargs) if in_channels != out_channels or stride > 1: self.downsampling_layer = [ keras.layers.LayerNormalization( epsilon=1e-6, name="downsampling_layer.0", ), # Inputs to this layer will follow NHWC format since we # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings` # layer. All the outputs throughout the model will be in NHWC # from this point on until the output where we again change to # NCHW. keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="downsampling_layer.1", ), ] else: self.downsampling_layer = [tf.identity] drop_path_rates = drop_path_rates or [0.0] * depth self.layers = [ TFConvNextLayer( config, dim=out_channels, drop_path=drop_path_rates[j], name=f"layers.{j}", ) for j in range(depth) ] self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def call(self, hidden_states): for layer in self.downsampling_layer: hidden_states = layer(hidden_states) for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if self.in_channels != self.out_channels or self.stride > 1: with tf.name_scope(self.downsampling_layer[0].name): self.downsampling_layer[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.downsampling_layer[1].name): self.downsampling_layer[1].build([None, None, None, self.in_channels])
class_definition
8,394
11,591
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,629
class TFConvNextEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.stages = [] drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths)) drop_path_rates = tf.split(drop_path_rates, config.depths) drop_path_rates = [x.numpy().tolist() for x in drop_path_rates] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = TFConvNextStage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f"stages.{i}", ) self.stages.append(stage) prev_chs = out_chs def call(self, hidden_states, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) def build(self, input_shape=None): for stage in self.stages: with tf.name_scope(stage.name): stage.build(None)
class_definition
11,594
13,328
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,630
class TFConvNextMainLayer(keras.layers.Layer): config_class = ConvNextConfig def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFConvNextEmbeddings(config, name="embeddings") self.encoder = TFConvNextEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") # We are setting the `data_format` like so because from here on we will revert to the # NCHW output format self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = encoder_outputs[0] # Change to NCHW output format have uniformity in the modules last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) pooled_output = self.layernorm(self.pooler(last_hidden_state)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: hidden_states = hidden_states if output_hidden_states else () return (last_hidden_state, pooled_output) + hidden_states return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, self.config.hidden_sizes[-1]])
class_definition
13,351
16,467
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,631
class TFConvNextPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values"
class_definition
16,470
16,773
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,632
class TFConvNextModel(TFConvNextPreTrainedModel): def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext") @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFConvNextModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnext( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None)
class_definition
20,352
22,905
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,633
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ConvNextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convnext = TFConvNextMainLayer(config, name="convnext") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0] >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)]) ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnext( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]])
class_definition
23,111
27,192
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_tf_convnext.py
null
3,634
class ConvNextImageProcessor(BaseImageProcessor): r""" Constructs a ConvNeXT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overriden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be overriden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 384} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size # Default value set here for backwards compatibility where the value in config is None self.crop_pct = crop_pct if crop_pct is not None else 224 / 256 self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: Dict[str, int], crop_pct: float, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format ) image = resize( image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return center_crop( image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger return resize( image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, crop_pct: float = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize( image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,438
15,826
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/image_processing_convnext.py
null
3,635
class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ConvNextImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
821
1,199
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/feature_extraction_convnext.py
null
3,636
class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-6): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNextConfig, ConvNextModel >>> # Initializing a ConvNext convnext-tiny-224 style configuration >>> configuration = ConvNextConfig() >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration >>> model = ConvNextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "convnext" def __init__( self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-6, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.patch_size = patch_size self.num_stages = num_stages self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
class_definition
1,036
5,725
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
null
3,637
class ConvNextOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5
class_definition
5,728
6,129
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/configuration_convnext.py
null
3,638
class ConvNextDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
2,974
3,456
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,639
class ConvNextLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x
class_definition
3,459
4,935
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,640
class ConvNextEmbeddings(nn.Module): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config): super().__init__() self.patch_embeddings = nn.Conv2d( config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size ) self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first") self.num_channels = config.num_channels def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings
class_definition
4,938
5,960
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,641
class ConvNextLayer(nn.Module): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv self.layernorm = ConvNextLayerNorm(dim, eps=1e-6) self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers self.act = ACT2FN[config.hidden_act] self.pwconv2 = nn.Linear(4 * dim, dim) self.layer_scale_parameter = ( nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: input = hidden_states x = self.dwconv(hidden_states) x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = input + self.drop_path(x) return x
class_definition
5,963
7,842
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,642
class ConvNextStage(nn.Module): """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextConfig`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): super().__init__() if in_channels != out_channels or stride > 1: self.downsampling_layer = nn.Sequential( ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride), ) else: self.downsampling_layer = nn.Identity() drop_path_rates = drop_path_rates or [0.0] * depth self.layers = nn.Sequential( *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)] ) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.downsampling_layer(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states
class_definition
7,845
9,239
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,643
class ConvNextEncoder(nn.Module): def __init__(self, config): super().__init__() self.stages = nn.ModuleList() drop_path_rates = [ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths) ] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = ConvNextStage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], ) self.stages.append(stage) prev_chs = out_chs def forward( self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, )
class_definition
9,242
10,890
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,644
class ConvNextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values" _no_split_modules = ["ConvNextLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
10,893
11,817
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,645
class ConvNextModel(ConvNextPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) # final layernorm layer self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # global average pooling, (N, C, H, W) -> (N, C) pooled_output = self.layernorm(last_hidden_state.mean([-2, -1])) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
13,206
15,290
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,646
class ConvNextForImageClassification(ConvNextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convnext = ConvNextModel(config) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: torch.FloatTensor = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
class_definition
15,496
18,753
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,647
class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self._out_features, self.channels): hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first") self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=hidden_states if output_hidden_states else None, attentions=None, )
class_definition
18,904
21,822
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/convnext/modeling_convnext.py
null
3,648
class GPTNeoXTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import GPTNeoXTokenizerFast >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("openai-community/gpt2") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `<|endoftext|>`): The end of sequence token. pad_token (`str`, *optional*): Token for padding a sequence. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPTNeoX tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether or not to add a `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. trim_offsets (`bool`, *optional*, defaults to `True`): Whether or not the post-processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token=None, add_bos_token=False, add_eos_token=False, add_prefix_space=False, **kwargs, ): super().__init__( vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, add_prefix_space=add_prefix_space, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class_definition
1,008
8,947
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py
null
3,649
class GPTNeoXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeoX [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50432): Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTNeoXModel`]. hidden_size (`int`, *optional*, defaults to 6144): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 44): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. rotary_pct (`float`, *optional*, defaults to 0.25): percentage of hidden dimensions to allocate to rotary embeddings rotary_emb_base (`int`, *optional*, defaults to 10000) base for computing rotary embeddings frequency attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio probability of the attention score. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp hidden states. classifier_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`]. The dropout ratio for the hidden layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 1e-5): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. use_parallel_residual (`bool`, *optional*, defaults to `True`): Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training speedup at large scales (e.g. 20B). rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. Example: ```python >>> from transformers import GPTNeoXConfig, GPTNeoXModel >>> # Initializing a GPTNeoX gpt-neox-20b style configuration >>> configuration = GPTNeoXConfig() >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration >>> model = GPTNeoXModel(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ```""" model_type = "gpt_neox" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50432, hidden_size=6144, num_hidden_layers=44, num_attention_heads=64, intermediate_size=24576, hidden_act="gelu", rotary_pct=0.25, rotary_emb_base=10000, attention_dropout=0.0, hidden_dropout=0.0, classifier_dropout=0.1, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, use_parallel_residual=True, rope_scaling=None, attention_bias=True, **kwargs, ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.partial_rotary_factor = rotary_pct self.rotary_emb_base = rotary_emb_base self.rope_theta = rotary_emb_base self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings self.use_parallel_residual = use_parallel_residual self.rope_scaling = rope_scaling self.attention_bias = attention_bias # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
class_definition
857
10,419
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py
null
3,650
class GPTNeoXPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoXConfig base_model_prefix = "gpt_neox" supports_gradient_checkpointing = True _no_split_modules = ["GPTNeoXLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
2,104
3,344
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,651
class GPTNeoXAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.config = config self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them" ) self.head_size = self.hidden_size // self.num_attention_heads self.rotary_ndims = int(self.head_size * config.rotary_pct) self.rope_theta = config.rotary_emb_base self._init_bias(config.max_position_embeddings) self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) self.rotary_emb = GPTNeoXRotaryEmbedding(config=self.config) if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.norm_factor = self.head_size**-0.5 self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias) self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.is_causal = True self.layer_idx = layer_idx def _init_bias(self, max_positions, device=None): self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) if device is not None: self.bias = self.bias.to(device) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor] = None, layer_past: Optional[Cache] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, padding_mask: Optional[torch.Tensor] = None, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): bsz, seq_len, _ = hidden_states.shape # Apply attention-specific projections and rope query, key, value, present = self._attn_projections_and_rope( hidden_states=hidden_states, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) # Checking for fallbacks in case an unsupported feature is requested attention_type = self.config._attn_implementation if (output_attentions or head_mask is not None) and self.config._attn_implementation in [ "sdpa", "flash_attention_2", ]: logger.warning_once( f"Setting `attention_type` to `eager` because `{attention_type}` does not support" f" `output_attentions=True` or `head_mask`." ) attention_type = "eager" elif ( self.training and self.config.attention_dropout > 0 and self.config._attn_implementation == "flex_attention" ): logger.warning_once( f"Setting `attention_type` to `eager` because `dropout` is not supported in `{attention_type}`." ) attention_type = "eager" # Compute attention attn_output, attn_weights = GPTNEOX_ATTENTION_FUNCTION[attention_type]( query, key, value, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, norm_factor=self.norm_factor, attention_dropout=self.config.attention_dropout, training=self.training, # Flash Attention 2 specific PEFT check target_dtype=self._fa_peft_dtype_check(value), ) # Reshape outputs and final projection attn_output = attn_output.contiguous() attn_output = attn_output.view(bsz, seq_len, -1) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): """ Splits hidden dim into attn_head_size and num_attention_heads """ # tensor: [bs, seq_len, hidden_size] new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(new_shape) # -> [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden dim """ # tensor [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3).contiguous() # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) # -> [bs, seq_len, hidden_size] return tensor def _attn_projections_and_rope( self, hidden_states: torch.FloatTensor, position_ids: torch.LongTensor, layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): # Compute QKV # Attention heads [batch, seq_len, hidden_size] # --> [batch, seq_len, (np * 3 * head_size)] qkv = self.query_key_value(hidden_states) # [batch, seq_len, (num_heads * 3 * head_size)] # --> [batch, seq_len, num_heads, 3 * head_size] new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) # Compute rotary embeddings on rotary_ndims query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] cos, sin = position_embeddings query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) # Cache QKV values if layer_past is not None: cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) return query, key, value, layer_past def _fa_peft_dtype_check(self, value): """ PEFT can silently cast the dtype to float32 - this method returns the target dtype to which FA should convert back to (if necessary). For now, we can not move this to the forward pass itself due to the dependency on checking on some part of its own weights (last case). """ target_dtype = None if self.config._attn_implementation == "flash_attention_2": input_dtype = value.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query_key_value.weight.dtype return target_dtype
class_definition
8,612
17,498
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,652
class GPTNeoXRotaryEmbedding(nn.Module): def __init__(self, config: GPTNeoXConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
17,597
20,796
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,653
class GPTNeoXMLP(nn.Module): def __init__(self, config): super().__init__() self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size) self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states
class_definition
22,600
23,134
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,654
class GPTNeoXLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_dropout = nn.Dropout(config.hidden_dropout) self.post_mlp_dropout = nn.Dropout(config.hidden_dropout) self.attention = GPTNeoXAttention(config, layer_idx) self.mlp = GPTNeoXMLP(config) def forward( self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, layer_past: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): attention_layer_outputs = self.attention( self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights) attn_output = self.post_attention_dropout(attn_output) outputs = attention_layer_outputs[1:] if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output + hidden_states else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output if use_cache: outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights) else: outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights) return outputs
class_definition
23,137
25,869
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,655
class GPTNeoXModel(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) self.emb_dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([GPTNeoXLayer(config, i) for i in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = GPTNeoXRotaryEmbedding(config=config) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_in def set_input_embeddings(self, value): self.embed_in = value @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) # kept for BC (non `Cache` `past_key_values` inputs) return_legacy_cache = False if use_cache and not isinstance(past_key_values, Cache): return_legacy_cache = True if past_key_values is None: past_key_values = DynamicCache() else: past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" ) seq_length = inputs_embeds.shape[1] if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] converted_head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # Flex Attention converts it to a separate mask if head_mask is not None: converted_head_mask = ~converted_head_mask.bool() * torch.finfo(inputs_embeds.dtype).min converted_head_mask = converted_head_mask.to(dtype=self.dtype, device=self.device) head_mask = converted_head_mask hidden_states = self.emb_dropout(inputs_embeds) # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) next_decoder_cache = None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate( self.layers, ): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, causal_mask, position_ids, head_mask[i], use_cache, None, output_attentions, cache_position, position_embeddings, ) else: outputs = layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], layer_past=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions, ) # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
30,613
44,220
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,656
class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel, GenerationMixin): _tied_weights_keys = ["embed_out.weight"] def __init__(self, config): super().__init__(config) self.gpt_neox = GPTNeoXModel(config) self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.embed_out def set_output_embeddings(self, new_embeddings): self.embed_out = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b") >>> config.is_decoder = True >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # we are doing next-token prediction; shift prediction scores and input ids by one shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithPast( loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past
class_definition
44,358
49,091
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,657
class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
49,891
55,115
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,658
class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
55,118
58,237
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,659
class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
58,547
62,802
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
null
3,660
class PoolFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PoolFormer [sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of channels in the input image. patch_size (`int`, *optional*, defaults to 16): The size of the input patch. stride (`int`, *optional*, defaults to 16): The stride of the input patch. pool_size (`int`, *optional*, defaults to 3): The size of the pooling window. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of the number of channels in the output of the MLP to the number of channels in the input. depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): The depth of each encoder block. hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`): The hidden sizes of each encoder block. patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`): The size of the input patch for each encoder block. strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`): The stride of the input patch for each encoder block. padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`): The padding of the input patch for each encoder block. num_encoder_blocks (`int`, *optional*, defaults to 4): The number of encoder blocks. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout rate for the dropout layers. hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function for the hidden layers. use_layer_scale (`bool`, *optional*, defaults to `True`): Whether to use layer scale. layer_scale_init_value (`float`, *optional*, defaults to 1e-05): The initial value for the layer scale. initializer_range (`float`, *optional*, defaults to 0.02): The initializer range for the weights. Example: ```python >>> from transformers import PoolFormerConfig, PoolFormerModel >>> # Initializing a PoolFormer sail/poolformer_s12 style configuration >>> configuration = PoolFormerConfig() >>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration >>> model = PoolFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "poolformer" def __init__( self, num_channels=3, patch_size=16, stride=16, pool_size=3, mlp_ratio=4.0, depths=[2, 2, 6, 2], hidden_sizes=[64, 128, 320, 512], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], padding=[2, 1, 1, 1], num_encoder_blocks=4, drop_path_rate=0.0, hidden_act="gelu", use_layer_scale=True, layer_scale_init_value=1e-5, initializer_range=0.02, **kwargs, ): self.num_channels = num_channels self.patch_size = patch_size self.stride = stride self.padding = padding self.pool_size = pool_size self.hidden_sizes = hidden_sizes self.mlp_ratio = mlp_ratio self.depths = depths self.patch_sizes = patch_sizes self.strides = strides self.num_encoder_blocks = num_encoder_blocks self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_layer_scale = use_layer_scale self.layer_scale_init_value = layer_scale_init_value self.initializer_range = initializer_range super().__init__(**kwargs)
class_definition
929
5,168
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py
null
3,661
class PoolFormerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 2e-3
class_definition
5,171
5,574
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/configuration_poolformer.py
null
3,662
class PoolFormerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob)
class_definition
2,802
3,286
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,663
class PoolFormerEmbeddings(nn.Module): """ Construct Patch Embeddings. """ def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None): super().__init__() patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding) self.norm = norm_layer(hidden_size) if norm_layer else nn.Identity() def forward(self, pixel_values): embeddings = self.projection(pixel_values) embeddings = self.norm(embeddings) return embeddings
class_definition
3,289
4,162
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,664
class PoolFormerGroupNorm(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs)
class_definition
4,165
4,396
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,665
class PoolFormerPooling(nn.Module): def __init__(self, pool_size): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, hidden_states): return self.pool(hidden_states) - hidden_states
class_definition
4,399
4,694
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,666
class PoolFormerOutput(nn.Module): def __init__(self, config, dropout_prob, hidden_size, intermediate_size): super().__init__() self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1) self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1) self.drop = PoolFormerDropPath(dropout_prob) if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.drop(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.drop(hidden_states) return hidden_states
class_definition
4,697
5,496
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,667
class PoolFormerLayer(nn.Module): """This corresponds to the 'PoolFormerBlock' class in the original implementation.""" def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path): super().__init__() self.pooling = PoolFormerPooling(pool_size) self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size) self.before_norm = PoolFormerGroupNorm(num_channels) self.after_norm = PoolFormerGroupNorm(num_channels) # Useful for training neural nets self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.use_layer_scale = config.use_layer_scale if config.use_layer_scale: self.layer_scale_1 = nn.Parameter( config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True ) self.layer_scale_2 = nn.Parameter( config.layer_scale_init_value * torch.ones((num_channels)), requires_grad=True ) def forward(self, hidden_states): if self.use_layer_scale: pooling_output = self.pooling(self.before_norm(hidden_states)) scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output # First residual connection hidden_states = hidden_states + self.drop_path(scaled_op) outputs = () layer_output = self.output(self.after_norm(hidden_states)) scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output # Second residual connection output = hidden_states + self.drop_path(scaled_op) outputs = (output,) + outputs return outputs else: pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states))) # First residual connection hidden_states = pooling_output + hidden_states outputs = () # Second residual connection inside the PoolFormerOutput block layer_output = self.drop_path(self.output(self.after_norm(hidden_states))) output = hidden_states + layer_output outputs = (output,) + outputs return outputs
class_definition
5,499
7,765
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,668
class PoolFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( PoolFormerLayer( config, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio), drop_path=dpr[cur + j], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None hidden_states = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block)): embedding_layer, block_layer = layers # Get patch embeddings from hidden_states hidden_states = embedding_layer(hidden_states) # Send the embeddings through the blocks for _, blk in enumerate(block_layer): layer_outputs = blk(hidden_states) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class_definition
7,768
10,400
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,669
class PoolFormerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = PoolFormerConfig base_model_prefix = "poolformer" main_input_name = "pixel_values" _no_split_modules = ["PoolFormerLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
10,403
11,173
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,670
class PoolFormerModel(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = PoolFormerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") encoder_outputs = self.encoder( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, )
class_definition
12,268
13,999
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,671
class PoolFormerFinalPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): output = self.dense(hidden_states) return output
class_definition
14,002
14,275
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,672
class PoolFormerForImageClassification(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.poolformer = PoolFormerModel(config) # Final norm self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1]) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.poolformer( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(self.norm(sequence_output).mean([-2, -1])) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
class_definition
14,426
17,779
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/modeling_poolformer.py
null
3,673
class PoolFormerImageProcessor(BaseImageProcessor): r""" Constructs a PoolFormer image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. If crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. crop_pct (`float`, *optional*, defaults to 0.9): Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can be overridden by the `crop_size` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, crop_pct: int = 0.9, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, rescale_factor: Union[int, float] = 1 / 255, do_rescale: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.crop_pct = crop_pct self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize( self, image: np.ndarray, size: Dict[str, int], crop_pct: Optional[float] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. If crop_pct is unset: - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`. - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the aspect ratio. if crop_pct is set: - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)), int(floor(w/crop_pct)))` - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)` whilst maintaining the aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. crop_pct (`float`, *optional*): Percentage of the image that will be cropped from the center. If set, the image is resized resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") if crop_pct is not None: if "shortest_edge" in size: scale_size = int(size["shortest_edge"] / crop_pct) elif "height" in size and "width" in size: if size["height"] == size["width"]: scale_size = int(size["height"] / crop_pct) else: scale_size = (int(size["height"] / crop_pct), int(size["width"] / crop_pct)) else: raise ValueError("Invalid size for resize: {}".format(size)) output_size = get_resize_output_image_size( image, size=scale_size, default_to_square=False, input_data_format=input_data_format ) else: if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(size)) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, crop_pct: int = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying center crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize( image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format ) for image in images ] if do_center_crop: images = [ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
class_definition
1,421
17,808
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/image_processing_poolformer.py
null
3,674
class PoolFormerFeatureExtractor(PoolFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
class_definition
827
1,213
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/poolformer/feature_extraction_poolformer.py
null
3,675
class MimiConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MimiModel`]. It is used to instantiate a Mimi model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [kyutai/mimi](https://huggingface.co/kyutai/mimi) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). frame_rate (`float`, *optional*, defaults to 12.5): Framerate of the model. audio_channels (`int`, *optional*, defaults to 1): Number of channels in the audio data. Either 1 for mono or 2 for stereo. hidden_size (`int`, *optional*, defaults to 512): Intermediate representation dimension. num_filters (`int`, *optional*, defaults to 64): Number of convolution kernels of first `MimiConv1d` down sampling layer. num_residual_layers (`int`, *optional*, defaults to 1): Number of residual layers. upsampling_ratios (`Sequence[int]`, *optional*): Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here that must match the decoder order. If not specified, will defaults to `[8, 6, 5, 4]` kernel_size (`int`, *optional*, defaults to 7): Kernel size for the initial convolution. last_kernel_size (`int`, *optional*, defaults to 3): Kernel size for the last convolution layer. residual_kernel_size (`int`, *optional*, defaults to 3): Kernel size for the residual layers. dilation_growth_rate (`int`, *optional*, defaults to 2): How much to increase the dilation with each layer. use_causal_conv (`bool`, *optional*, defaults to `True`): Whether to use fully causal convolution. pad_mode (`str`, *optional*, defaults to `"constant"`): Padding mode for the convolutions. compress (`int`, *optional*, defaults to 2): Reduced dimensionality in residual branches. trim_right_ratio (`float`, *optional*, defaults to 1.0): Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If equal to 1.0, it means that all the trimming is done at the right. codebook_size (`int`, *optional*, defaults to 2048): Number of discret codes in each codebooks. codebook_dim (`int`, *optional*, defaults to 256): Dimension of the unquantized codebook vectors. If not defined, uses `hidden_size`. num_quantizers (`int`, *optional*, defaults to 32): Number of quantizer channels, or codebooks, in the quantizer. use_conv_shortcut (`bool`, *optional*, defaults to `False`): Whether to use a convolutional layer as the 'skip' connection in the `MimiResnetBlock` block. If False, an identity function will be used, giving a generic residual connection. vector_quantization_hidden_dimension (`int`, *optional*, defaults to 256): Intermediate representation dimension in the residual vector quantization space. num_semantic_quantizers (`int`, *optional*, defaults to 1): Number of semantic quantizer channels, or codebooks, in the semantic quantizer. Must be lower than `num_quantizers`. upsample_groups (`int`, *optional*, defaults to 512): If `frame_rate!=encodec_frame_rate`, indicates the number of groups used in the upsampling operation to go from one rate to another. num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer models. intermediate_size (`int`, *optional*, defaults to 2048): Dimension of the MLP representations. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 8000): The maximum sequence length that this model might ever be used with. Mimi's sliding window attention allows sequence of up to 8000 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the LayerNorm normalization layers. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*, defaults to 250): Sliding window attention window size. If not specified, will default to `250`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layer_scale_initial_scale (`float`, *optional*, defaults to 0.01): Initiale scale of the residual rescaling operation done in the Transformer models. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. Example: ```python >>> from transformers import MimiModel, MimiConfig >>> # Initializing a "kyutai/mimi" style configuration >>> configuration = MimiConfig() >>> # Initializing a model (with random weights) from the "kyutai/mimi" style configuration >>> model = MimiModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mimi" def __init__( self, sampling_rate=24_000, frame_rate=12.5, audio_channels=1, hidden_size=512, num_filters=64, num_residual_layers=1, upsampling_ratios=None, kernel_size=7, last_kernel_size=3, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode="constant", compress=2, trim_right_ratio=1.0, codebook_size=2048, codebook_dim=256, num_quantizers=32, use_conv_shortcut=False, vector_quantization_hidden_dimension=256, num_semantic_quantizers=1, upsample_groups=512, num_hidden_layers=8, intermediate_size=2048, num_attention_heads=8, num_key_value_heads=8, head_dim=None, hidden_act="gelu", max_position_embeddings=8000, initializer_range=0.02, norm_eps=1e-5, use_cache=False, rope_theta=10000.0, sliding_window=250, attention_dropout=0.0, layer_scale_initial_scale=0.01, attention_bias=False, **kwargs, ): self.sampling_rate = sampling_rate self.frame_rate = frame_rate self.audio_channels = audio_channels self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios if upsampling_ratios else [8, 6, 5, 4] self.kernel_size = kernel_size self.last_kernel_size = last_kernel_size self.residual_kernel_size = residual_kernel_size self.dilation_growth_rate = dilation_growth_rate self.use_causal_conv = use_causal_conv self.pad_mode = pad_mode self.compress = compress self.trim_right_ratio = trim_right_ratio self.codebook_size = codebook_size self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size self.num_quantizers = num_quantizers self.use_conv_shortcut = use_conv_shortcut self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension self.upsample_groups = upsample_groups self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.norm_eps = norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.sliding_window = sliding_window self.attention_dropout = attention_dropout self.head_dim = head_dim or hidden_size // num_attention_heads self.layer_scale_initial_scale = layer_scale_initial_scale self.attention_bias = attention_bias if num_semantic_quantizers >= self.num_quantizers: raise ValueError( f"The number of semantic quantizers should be lower than the total number of quantizers {self.num_quantizers}, but is currently {num_semantic_quantizers}." ) self.num_semantic_quantizers = num_semantic_quantizers super().__init__(**kwargs) @property def encodec_frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def num_codebooks(self) -> int: # alias to num_quantizers return self.num_quantizers
class_definition
855
11,893
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/configuration_mimi.py
null
3,676
class MimiOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) Decoded audio values, obtained using the decoder part of Mimi. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). """ audio_codes: torch.LongTensor = None audio_values: torch.FloatTensor = None encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
class_definition
1,643
3,631
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,677
class MimiEncoderOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). """ audio_codes: torch.LongTensor = None encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
class_definition
3,645
4,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,678
class MimiDecoderOutput(ModelOutput): """ Args: audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Mimi. decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). """ audio_values: torch.FloatTensor = None decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None
class_definition
4,690
5,719
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,679
class MimiConv1d(nn.Module): """Conv1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1, groups: int = 1, pad_mode=None, bias: bool = True, ): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode if pad_mode is None else pad_mode # warn user on unusual setup between dilation and stride if stride > 1 and dilation > 1: logger.warning( "MimiConv1d has been initialized with stride > 1 and dilation > 1" f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})." ) self.conv = nn.Conv1d( in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups, bias=bias ) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] # Effective kernel size with dilations. kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer("stride", stride, persistent=False) self.register_buffer("kernel_size", kernel_size, persistent=False) self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False) # Asymmetric padding required for odd strides self.padding_right = self.padding_total // 2 self.padding_left = self.padding_total - self.padding_right def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv) # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._get_extra_padding_for_conv1d def _get_extra_padding_for_conv1d( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """See `pad_for_conv1d`.""" length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._pad1d def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0): """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happens. """ length = hidden_states.shape[-1] padding_left, padding_right = paddings if not mode == "reflect": return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def forward(self, hidden_states): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if self.causal: # Left padding for causal hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: hidden_states = self._pad1d( hidden_states, (self.padding_left, self.padding_right + extra_padding), mode=self.pad_mode ) hidden_states = self.conv(hidden_states) return hidden_states
class_definition
5,722
9,855
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,680
class MimiConvTranspose1d(nn.Module): """ConvTranspose1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias=True, ): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, groups=groups, bias=bias) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions") kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be # removed at the very end, when keeping only the right length for the output, # as removing it here would require also passing the length at the matching layer # in the encoder. if self.causal: # Trim the padding on the right according to the specified ratio # if trim_right_ratio = 1.0, trim everything from right self.padding_right = math.ceil(padding_total * self.trim_right_ratio) else: # Asymmetric padding required for odd strides self.padding_right = padding_total // 2 self.padding_left = padding_total - self.padding_right def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) # unpad end = hidden_states.shape[-1] - self.padding_right hidden_states = hidden_states[..., self.padding_left : end] return hidden_states
class_definition
9,858
12,019
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,681
class MimiResnetBlock(nn.Module): """ Residual block from SEANet model as used by Mimi. """ def __init__(self, config: MimiConfig, dim: int, dilations: List[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError("Number of kernel sizes should match number of dilations") hidden = dim // config.compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [MimiConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = MimiConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states): residual = hidden_states for layer in self.block: hidden_states = layer(hidden_states) return self.shortcut(residual) + hidden_states
class_definition
12,133
13,331
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,682
class MimiEncoder(nn.Module): """SEANet encoder as used by Mimi.""" def __init__(self, config: MimiConfig): super().__init__() model = [MimiConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 # Downsample to raw audio scale for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters # Add residual layers for j in range(config.num_residual_layers): model += [MimiResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])] # Add downsampling layers model += [nn.ELU()] model += [MimiConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [nn.ELU()] model += [MimiConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) # Copied from transformers.models.encodec.modeling_encodec.EncodecEncoder.forward def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states
class_definition
13,334
14,576
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,683
class MimiLayerScale(nn.Module): """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). This rescales diagonally the residual outputs close to 0, with a learnt scale. """ def __init__(self, config): super().__init__() channels = config.hidden_size initial_scale = config.layer_scale_initial_scale self.scale = nn.Parameter(torch.full((channels,), initial_scale, requires_grad=True)) def forward(self, x: torch.Tensor): return self.scale * x
class_definition
14,579
15,107
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,684
class MimiRotaryEmbedding(nn.Module): def __init__(self, config: MimiConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class_definition
15,211
18,404
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,685
class MimiMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) # Copied from transformers.models.clip.modeling_clip.CLIPMLP.forward def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class_definition
20,275
20,942
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,686
class MimiAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: MimiConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.scaling = 1 / math.sqrt(config.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.rotary_emb = MimiRotaryEmbedding(config) self.sliding_window = config.sliding_window # Ignore copy def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
21,751
26,537
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,687
class MimiFlashAttention2(MimiAttention): """ Mimi flash attention module. This module inherits from `MimiAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if isinstance(past_key_value, StaticCache): raise ValueError( "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" ) output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (MimiRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value
class_definition
26,671
32,257
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,688
class MimiSdpaAttention(MimiAttention): """ Mimi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MimiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from MimiAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MimiModel is using MimiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value
class_definition
32,389
36,809
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,689
class MimiTransformerLayer(nn.Module): def __init__(self, config: MimiConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MIMI_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = MimiMLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps) self.self_attn_layer_scale = MimiLayerScale(config) self.mlp_layer_scale = MimiLayerScale(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = residual + self.self_attn_layer_scale(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.mlp_layer_scale(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class_definition
36,948
40,399
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,690
class MimiTransformerModel(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MimiTransformerLayer`] Args: config: MimiConfig """ def __init__(self, config: MimiConfig): super().__init__() self.layers = nn.ModuleList( [MimiTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.config = config def forward( self, hidden_states: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Embedded representation that will be contextualized by the model attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if use_cache and not isinstance(past_key_values, Cache): if past_key_values is None: past_key_values = DynamicCache() else: past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = None if attention_mask is not None: causal_mask = self._update_causal_mask( attention_mask, hidden_states, cache_position, past_key_values, output_attentions ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask with Phi3->Mimi def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Mimi. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if ( self.config._attn_implementation == "sdpa" and not (using_static_cache or using_sliding_window_cache) and not output_attentions ): if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] # SlidingWindowCache or StaticCache if using_sliding_window_cache or using_static_cache: target_length = past_key_values.get_max_cache_shape() # DynamicCache or no cache else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda" and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Mimi def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, config: MimiConfig, past_key_values: Cache, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MimiConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) if config.sliding_window is not None: # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also # the check is needed to verify is current checkpoint was trained with sliding window or not if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=device) <= ( cache_position.reshape(-1, 1) - config.sliding_window ) diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
class_definition
40,402
57,139
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,691
class MimiDecoder(nn.Module): """SEANet decoder as used by Mimi.""" def __init__(self, config: MimiConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [MimiConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] # Upsample to raw audio scale for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters # Add upsampling layers model += [nn.ELU()] model += [ MimiConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio) ] # Add residual layers for j in range(config.num_residual_layers): model += [MimiResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))] scaling //= 2 # Add final layers model += [nn.ELU()] model += [MimiConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) # Copied from transformers.models.encodec.modeling_encodec.EncodecDecoder.forward def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states
class_definition
57,142
58,480
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,692
class MimiEuclideanCodebook(nn.Module): """Codebook with Euclidean distance.""" def __init__(self, config: MimiConfig, epsilon: float = 1e-5): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer("initialized", torch.Tensor([True])) self.register_buffer("cluster_usage", torch.ones(config.codebook_size)) self.register_buffer("embed_sum", embed) self._embed = None self.epsilon = epsilon @property def embed(self) -> torch.Tensor: if self._embed is None: self._embed = self.embed_sum / self.cluster_usage.clamp(min=self.epsilon)[:, None] return self._embed def quantize(self, hidden_states): # Projects each vector in `hidden_states` over the nearest centroid and return its index. # `hidden_states` should be `[N, D]` with `N` the number of input vectors and `D` the dimension. dists = torch.cdist(hidden_states[None], self.embed[None], p=2)[0] embed_ind = dists.argmin(dim=-1) return embed_ind # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.encode def encode(self, hidden_states): shape = hidden_states.shape # pre-process hidden_states = hidden_states.reshape((-1, shape[-1])) # quantize embed_ind = self.quantize(hidden_states) # post-process embed_ind = embed_ind.view(*shape[:-1]) return embed_ind # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.decode def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize
class_definition
58,483
60,264
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,693
class MimiVectorQuantization(nn.Module): """ Vector quantization implementation. Currently supports only euclidean distance. """ def __init__(self, config: MimiConfig): super().__init__() self.codebook = MimiEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize
class_definition
60,371
60,963
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,694
class MimiResidualVectorQuantizer(nn.Module): """Residual Vector Quantizer.""" def __init__(self, config: MimiConfig, num_quantizers: int = None): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = num_quantizers if num_quantizers is not None else config.num_quantizers self.layers = nn.ModuleList([MimiVectorQuantization(config) for _ in range(self.num_quantizers)]) self.input_proj = None self.output_proj = None if config.vector_quantization_hidden_dimension != config.hidden_size: self.input_proj = torch.nn.Conv1d( config.hidden_size, config.vector_quantization_hidden_dimension, 1, bias=False ) self.output_proj = torch.nn.Conv1d( config.vector_quantization_hidden_dimension, config.hidden_size, 1, bias=False ) def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[int] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ if self.input_proj is not None: embeddings = self.input_proj(embeddings) num_quantizers = num_quantizers if num_quantizers is not None else self.num_quantizers residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes of shape [B, K, T] to the quantized representation.""" quantized_out = torch.tensor(0.0, device=codes.device) codes = codes.transpose(0, 1) for i, indices in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized if self.output_proj is not None: quantized_out = self.output_proj(quantized_out) return quantized_out
class_definition
60,966
63,369
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,695
class MimiSplitResidualVectorQuantizer(nn.Module): """Split Residual Vector Quantizer.""" def __init__(self, config: MimiConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.max_num_quantizers = config.num_quantizers self.num_semantic_quantizers = config.num_semantic_quantizers self.num_acoustic_quantizers = config.num_quantizers - config.num_semantic_quantizers self.semantic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_semantic_quantizers) self.acoustic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_acoustic_quantizers) def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[float] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ num_quantizers = self.max_num_quantizers if num_quantizers is None else num_quantizers if num_quantizers > self.max_num_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.max_num_quantizers}, but is currently {num_quantizers}." ) if num_quantizers < self.num_semantic_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be higher than the number of semantic quantizers {self.num_semantic_quantizers}, but is currently {num_quantizers}." ) # codes is [K, B, T], with T frames, K nb of codebooks. codes = self.semantic_residual_vector_quantizer.encode(embeddings) if num_quantizers > self.num_semantic_quantizers: acoustic_codes = self.acoustic_residual_vector_quantizer.encode( embeddings, num_quantizers=num_quantizers - self.num_semantic_quantizers ) codes = torch.cat([codes, acoustic_codes], dim=0) return codes def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes to the quantized representation.""" # The first num_semantic_quantizers codebooks are decoded using the semantic RVQ quantized_out = self.semantic_residual_vector_quantizer.decode(codes[:, : self.num_semantic_quantizers]) # The rest of the codebooks are decoded using the acoustic RVQ if codes.shape[1] > self.num_semantic_quantizers: quantized_out += self.acoustic_residual_vector_quantizer.decode(codes[:, self.num_semantic_quantizers :]) return quantized_out
class_definition
63,372
66,185
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,696
class MimiPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MimiConfig base_model_prefix = "mimi" main_input_name = "input_values" supports_gradient_checkpointing = True _no_split_modules = ["MimiDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_static_cache = True # Copied from transformers.models.encodec.modeling_encodec.EncodecPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LSTM): for name, param in module.named_parameters(): if "weight" in name: nn.init.xavier_uniform_(param) elif "bias" in name: nn.init.constant_(param, 0.0)
class_definition
66,188
68,066
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,697
class MimiModel(MimiPreTrainedModel): def __init__(self, config: MimiConfig): super().__init__(config) self.config = config self.encoder = MimiEncoder(config) self.encoder_transformer = MimiTransformerModel(config) self.downsample = None self.upsample = None if config.frame_rate != config.encodec_frame_rate: self.downsample = MimiConv1d( config, config.hidden_size, config.hidden_size, kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate), stride=2, bias=False, pad_mode="replicate", ) self.upsample = MimiConvTranspose1d( config, config.hidden_size, config.hidden_size, kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate), stride=2, bias=False, groups=config.upsample_groups, ) self.decoder_transformer = MimiTransformerModel(config) self.decoder = MimiDecoder(config) self.quantizer = MimiSplitResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame( self, input_values: torch.Tensor, num_quantizers: int, padding_mask: int, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Encodes the given input using the underlying VQVAE. The padding mask is required to compute the correct scale. """ embeddings = self.encoder(input_values) encoder_outputs = self.encoder_transformer( embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict ) if return_dict: past_key_values = encoder_outputs.get("past_key_values") elif len(encoder_outputs) > 1: past_key_values = encoder_outputs[1] embeddings = encoder_outputs[0].transpose(1, 2) embeddings = self.downsample(embeddings) codes = self.quantizer.encode(embeddings, num_quantizers) codes = codes.transpose(0, 1) return codes, past_key_values def encode( self, input_values: torch.Tensor, padding_mask: torch.Tensor = None, num_quantizers: Optional[float] = None, encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], MimiEncoderOutput]: """ Encodes the input audio waveform into discrete codes. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked*. num_quantizers (`int`, *optional*): Number of quantizers (i.e codebooks) to use. By default, all quantizers are used. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `codebook` of shape `[batch_size, num_codebooks, frames]`, the discrete encoded codes for the input audio waveform. """ return_dict = return_dict if return_dict is not None else self.config.return_dict num_quantizers = self.config.num_quantizers if num_quantizers is None else num_quantizers if num_quantizers > self.config.num_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.config.num_quantizers}, but is currently {num_quantizers}." ) _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}") if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() encoded_frames, encoder_past_key_values = self._encode_frame( input_values, num_quantizers, padding_mask.bool(), past_key_values=encoder_past_key_values, return_dict=return_dict, ) if not return_dict: return ( encoded_frames, encoder_past_key_values, ) return MimiEncoderOutput(encoded_frames, encoder_past_key_values) def _decode_frame( self, codes: torch.Tensor, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> torch.Tensor: embeddings = self.quantizer.decode(codes) embeddings = self.upsample(embeddings) decoder_outputs = self.decoder_transformer( embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict ) if return_dict: past_key_values = decoder_outputs.get("past_key_values") elif len(decoder_outputs) > 1: past_key_values = decoder_outputs[1] embeddings = decoder_outputs[0].transpose(1, 2) outputs = self.decoder(embeddings) return outputs, past_key_values def decode( self, audio_codes: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], MimiDecoderOutput]: """ Decodes the given frames into an output audio waveform. Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be trimmed. Args: audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked*. decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ return_dict = return_dict if return_dict is not None else self.config.return_dict audio_values, decoder_past_key_values = self._decode_frame( audio_codes, past_key_values=decoder_past_key_values, return_dict=return_dict ) # truncate based on padding mask if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., : padding_mask.shape[-1]] if not return_dict: return ( audio_values, decoder_past_key_values, ) return MimiDecoderOutput(audio_values, decoder_past_key_values) @add_start_docstrings_to_model_forward(MIMI_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MimiOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, num_quantizers: Optional[int] = None, audio_codes: Optional[torch.Tensor] = None, encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], MimiOutput]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoFeatureExtractor, MimiModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model_id = "kyutai/mimi" >>> model = MimiModel.from_pretrained(model_id) >>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) >>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_codes = outputs.audio_codes >>> audio_values = outputs.audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if audio_codes is None: encoder_outputs = self.encode( input_values, padding_mask, num_quantizers, encoder_past_key_values, return_dict=return_dict ) audio_codes = encoder_outputs[0] if return_dict: encoder_past_key_values = encoder_outputs.get("past_key_values") elif len(encoder_outputs) > 1: encoder_past_key_values = encoder_outputs[1] decoder_outputs = self.decode(audio_codes, padding_mask, decoder_past_key_values, return_dict=return_dict) audio_values = decoder_outputs[0] if return_dict: decoder_past_key_values = decoder_outputs.get("past_key_values") elif len(decoder_outputs) > 1: decoder_past_key_values = decoder_outputs[1] if not return_dict: return (audio_codes, audio_values, encoder_past_key_values, decoder_past_key_values) return MimiOutput( audio_codes=audio_codes, audio_values=audio_values, encoder_past_key_values=encoder_past_key_values, decoder_past_key_values=decoder_past_key_values, )
class_definition
71,249
83,223
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/mimi/modeling_mimi.py
null
3,698
class WavLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the WavLM [microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`WavLMForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Propability of each feature vector along the time axis to be chosen as the start of the vector span to be masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked along the time axis. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Propability of each feature vector along the feature axis to be chosen as the start of the vector span to be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked along the time axis. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`WavLMForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`WavLMForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`WavLMForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 3): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. Example: ```python ``` Example: ```python >>> from transformers import WavLMConfig, WavLMModel >>> # Initializing a WavLM facebook/wavlm-base-960h style configuration >>> configuration = WavLMConfig() >>> # Initializing a model (with random weights) from the facebook/wavlm-base-960h style configuration >>> model = WavLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wavlm" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, num_buckets=320, max_bucket_distance=800, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_buckets = num_buckets self.max_bucket_distance = max_bucket_distance self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.num_ctc_classes = num_ctc_classes self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.output_hidden_size = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
class_definition
861
18,535
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/wavlm/configuration_wavlm.py
null
3,699