text
stringlengths 31
243k
| type
stringclasses 1
value | start
int64 36
275k
| end
int64 286
280k
| depth
int64 0
1
| filepath
stringlengths 85
188
| parent_class
stringclasses 3
values | class_index
int64 0
10.8k
|
---|---|---|---|---|---|---|---|
class OmDetTurboTaskEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.mlp = OmDetTurboMLPWithDropout(config)
self.res1 = OmDetTurboResidualLayer(config)
def forward(self, x):
mlp_out = self.mlp(x)
x = self.res1(x, mlp_out)
return x
|
class_definition
| 43,485 | 43,795 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
| null | 3,400 |
class OmDetTurboDeformableTransformerDecoderLayer(nn.Module):
"""
A single layer of the Deformable Transformer Decoder.
"""
def __init__(self, config):
super().__init__()
# self attention
self.self_attn = OmDetTurboMultiheadAttention(
config,
hidden_size=config.decoder_hidden_dim,
num_attention_heads=config.decoder_num_heads,
dropout=config.decoder_dropout,
)
self.dropout1 = nn.Dropout(config.decoder_dropout)
self.norm1 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
# cross attention
self.cross_attn = OmDetTurboMultiscaleDeformableAttention(
config, num_heads=config.decoder_num_heads, n_points=config.decoder_num_points
)
self.dropout2 = nn.Dropout(config.decoder_dropout)
self.norm2 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
# feed forward network
self.linear1 = nn.Linear(config.decoder_hidden_dim, config.decoder_dim_feedforward)
self.act = ACT2FN[config.decoder_activation]
self.dropout3 = nn.Dropout(config.decoder_dropout)
self.linear2 = nn.Linear(config.decoder_dim_feedforward, config.decoder_hidden_dim)
self.dropout4 = nn.Dropout(config.decoder_dropout)
self.norm3 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(
self,
decoder_embeddings,
task_features,
reference_points,
vision_features,
vision_shapes,
vision_shapes_list,
level_start_index=None,
attention_mask=None,
padding_mask=None,
query_position=None,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = output_attentions if output_attentions is not None else self.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states
origin_embedding_len = decoder_embeddings.shape[1]
# self attention
query = key = self.with_pos_embed(decoder_embeddings, query_position)
# combine task_features with query, key, value
task_features = task_features.transpose(0, 1)
query = torch.cat((query, task_features), dim=1)
key = torch.cat((key, task_features), dim=1)
decoder_embeddings = torch.cat((decoder_embeddings, task_features), dim=1)
outputs = self.self_attn(
query,
key,
decoder_embeddings,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
context, self_attention = outputs if output_attentions else (outputs[0], None)
decoder_embeddings = decoder_embeddings + self.dropout1(context)
decoder_embeddings = self.norm1(decoder_embeddings)
task_features = decoder_embeddings[:, origin_embedding_len:, :].transpose(0, 1)
decoder_embeddings = decoder_embeddings[:, :origin_embedding_len, :]
# cross attention
hidden_states = self.with_pos_embed(decoder_embeddings, query_position)
reference_points = reference_points.unsqueeze(2)
outputs, cross_attention = self.cross_attn(
hidden_states=hidden_states,
attention_mask=padding_mask,
encoder_hidden_states=vision_features,
reference_points=reference_points,
spatial_shapes=vision_shapes,
spatial_shapes_list=vision_shapes_list,
level_start_index=level_start_index,
)
decoder_embeddings = decoder_embeddings + self.dropout2(outputs)
residual = self.norm2(decoder_embeddings)
# feed forward network
decoder_embeddings = self.linear2(self.dropout3(self.act(self.linear1(residual))))
decoder_embeddings = residual + self.dropout4(decoder_embeddings)
decoder_embeddings = self.norm3(decoder_embeddings)
return (
decoder_embeddings,
task_features,
self_attention if output_attentions else None,
cross_attention if output_attentions else None,
)
|
class_definition
| 43,798 | 48,248 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
| null | 3,401 |
class OmDetTurboPreTrainedModel(PreTrainedModel):
config_class = OmDetTurboConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
def _init_weights(self, module):
def linear_init_(module_to_init):
bound = 1 / math.sqrt(module_to_init.weight.shape[0])
nn.init.uniform_(module_to_init.weight, -bound, bound)
if hasattr(module_to_init, "bias") and module_to_init.bias is not None:
nn.init.uniform_(module_to_init.bias, -bound, bound)
if isinstance(module, OmDetTurboEncoderLayer):
linear_init_(module.fc1)
linear_init_(module.fc2)
elif isinstance(module, OmDetTurboDecoder):
nn.init.constant_(module.encoder_bbox_head.layers[-1].weight, 0.0)
nn.init.constant_(module.encoder_bbox_head.layers[-1].bias, 0.0)
for mlp in module.decoder_bbox_head:
nn.init.constant_(mlp.layers[-1].weight, 0.0)
nn.init.constant_(mlp.layers[-1].bias, 0.0)
linear_init_(module.encoder_vision_features[0])
nn.init.xavier_uniform_(module.encoder_vision_features[0].weight)
if module.learn_initial_query:
nn.init.xavier_uniform_(module.tgt_embed.weight)
nn.init.xavier_uniform_(module.query_position_head.layers[0].weight)
nn.init.xavier_uniform_(module.query_position_head.layers[1].weight)
for layer in module.channel_projection_layers:
nn.init.xavier_uniform_(layer[0].weight)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, OmDetTurboDecoder):
module.gradient_checkpointing = value
@staticmethod
def _get_cache_key_at_index(input_ids, attention_mask, index):
input_ids = input_ids[index]
input_mask = attention_mask[index]
cache_key = tuple(input_ids[input_mask != 0].tolist())
return cache_key
def get_cached_class_embeddings(self, classes_input_ids, classes_attention_mask):
not_cached_index = []
not_cached_classes = []
total_embeddings = []
for idx, _ in enumerate(classes_input_ids):
cache_key = self._get_cache_key_at_index(classes_input_ids, classes_attention_mask, idx)
if self.language_cache_class.has(cache_key):
total_embeddings.append(self.language_cache_class.get(cache_key))
else:
total_embeddings.append(None)
not_cached_index.append(idx)
not_cached_classes.append(cache_key)
if not_cached_classes:
not_cached_classes_ids = torch.stack([classes_input_ids[idx] for idx in not_cached_index])
embeddings = self.language_backbone(not_cached_classes_ids, encode_type="class")
for idx, emb in enumerate(embeddings):
idx_to_put = not_cached_index[idx]
total_embeddings[idx_to_put] = emb
self.language_cache_class.put(not_cached_classes[idx], emb)
total_class_embs = torch.stack(total_embeddings).to(self.device)
return total_class_embs
def get_cached_task_embeddings(self, tasks_input_ids, tasks_attention_mask):
not_cached_index = []
not_cached_tasks = []
total_task_features = []
total_task_masks = []
for idx, _ in enumerate(tasks_input_ids):
cache_key = self._get_cache_key_at_index(tasks_input_ids, tasks_attention_mask, idx)
if self.language_cache_prompt.has(cache_key):
task_feature, task_mask = self.language_cache_prompt.get(cache_key)
total_task_features.append(task_feature)
total_task_masks.append(task_mask)
else:
total_task_features.append(None)
total_task_masks.append(None)
not_cached_index.append(idx)
not_cached_tasks.append(cache_key)
if not_cached_tasks:
not_cached_index_ids = torch.stack([tasks_input_ids[idx] for idx in not_cached_index])
not_cached_mask = torch.stack([tasks_attention_mask[idx] for idx in not_cached_index])
embeddings, masks = self.language_backbone(not_cached_index_ids, mask=not_cached_mask, encode_type="task")
for idx in range(embeddings.shape[1]):
emb = embeddings[:, [idx], :]
idx_to_put = not_cached_index[idx]
cur_mask = torch.unsqueeze(masks[idx], dim=0).to(self.device)
total_task_features[idx_to_put] = emb
total_task_masks[idx_to_put] = cur_mask
self.language_cache_prompt.put(not_cached_tasks[idx], (emb, cur_mask))
# pad before concat if needed
max_len = max([task.shape[0] for task in total_task_features])
for idx, task in enumerate(total_task_features):
if task.shape[0] < max_len:
pad_size = max_len - task.shape[0]
total_task_features[idx] = F.pad(task, (0, 0, 0, 0, 0, pad_size))
total_task_masks[idx] = F.pad(total_task_masks[idx], (0, pad_size))
total_task_features = torch.cat(total_task_features, dim=1).to(self.device)
total_task_masks = torch.cat(total_task_masks, dim=0).to(self.device)
return total_task_features, total_task_masks
def get_language_embedding(
self,
classes_input_ids,
classes_attention_mask,
tasks_input_ids,
tasks_attention_mask,
classes_structure,
):
batched_classes_embeddings = self.get_cached_class_embeddings(classes_input_ids, classes_attention_mask)
# regroup class embeddings using saved structure
max_class_size = torch.max(classes_structure)
class_embeddings_regrouped = []
start = 0
for size in classes_structure:
pad_size = max_class_size - size
class_embeddings_regrouped.append(
F.pad(batched_classes_embeddings[start : start + size], (0, 0, 0, pad_size)).unsqueeze(1)
)
start += size
class_embeddings = torch.cat(class_embeddings_regrouped, dim=1)
task_embeddings, task_mask = self.get_cached_task_embeddings(tasks_input_ids, tasks_attention_mask)
return class_embeddings, task_embeddings, task_mask
|
class_definition
| 48,251 | 54,867 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
| null | 3,402 |
class OmDetTurboDecoder(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
self.config = config
super().__init__(config)
self.gradient_checkpointing = False
hidden_dim = config.decoder_hidden_dim
self.num_queries = config.num_queries
self.class_distance_type = config.class_distance_type
self.learn_initial_query = config.learn_initial_query
# backbone feature projection
self.channel_projection_layers = nn.ModuleList(
nn.Sequential(nn.Conv2d(x, hidden_dim, 1, bias=False), nn.BatchNorm2d(hidden_dim))
for x in config.vision_features_channels
)
self.task_encoder = OmDetTurboTaskEncoder(config)
if config.class_embed_dim != hidden_dim:
self.task_project = nn.Linear(config.class_embed_dim, hidden_dim)
# Transformer module
self.layers = nn.ModuleList(
[OmDetTurboDeformableTransformerDecoderLayer(config) for _ in range(config.decoder_num_layers)]
)
self.decoder_num_layers = config.decoder_num_layers
# decoder embedding
if self.learn_initial_query:
self.tgt_embed = nn.Embedding(self.num_queries, hidden_dim)
self.query_position_head = OmDetTurboMLP(
input_dim=4, hidden_dim=2 * hidden_dim, output_dim=hidden_dim, num_layers=2
)
# encoder head
self.encoder_vision_features = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim, eps=config.layer_norm_eps)
)
self.encoder_class_head = nn.Linear(config.class_embed_dim, hidden_dim)
self.encoder_bbox_head = OmDetTurboMLP(input_dim=hidden_dim, hidden_dim=hidden_dim, output_dim=4, num_layers=3)
# decoder head
self.decoder_class_head = nn.ModuleList(
[nn.Linear(config.class_embed_dim, hidden_dim) for _ in range(config.decoder_num_layers)]
)
self.decoder_bbox_head = nn.ModuleList(
[OmDetTurboMLP(hidden_dim, hidden_dim, 4, num_layers=3) for _ in range(config.decoder_num_layers)]
)
# Initialize weights and apply final processing
self.post_init()
@lru_cache(maxsize=32)
def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device="cpu", dtype=torch.float32):
# We always generate anchors in float32 to preserve equivalence between
# dynamic and static anchor inference
# Ignore copy
if spatial_shapes is None:
raise ValueError("spatial_shapes must be provided")
anchors = []
for level, (height, width) in enumerate(spatial_shapes):
grid_y, grid_x = torch.meshgrid(
torch.arange(end=height, dtype=dtype, device=device),
torch.arange(end=width, dtype=dtype, device=device),
indexing="ij",
)
grid_xy = torch.stack([grid_x, grid_y], -1)
valid_wh = torch.tensor([width, height], dtype=dtype, device=device)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_wh
wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**level)
anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4))
# define the valid range for anchor coordinates
eps = 1e-2
anchors = torch.concat(anchors, 1)
valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)
anchors = torch.log(anchors / (1 - anchors))
anchors = torch.where(valid_mask, anchors, torch.inf)
return anchors, valid_mask
def _get_encoder_input(self, vision_features):
# get projection features
vision_features = [self.channel_projection_layers[i](feat) for i, feat in enumerate(vision_features)]
# get encoder inputs
new_vision_features = []
new_vision_shapes_list = []
for feat in vision_features:
height, width = feat.shape[2:]
# [batch_size, channels, height, width] -> [batch_size, height*width, channels]
new_vision_features.append(feat.flatten(2).permute(0, 2, 1))
# [num_feature_levels, 2]
new_vision_shapes_list.append((height, width))
# [batch_size, height*width, channels]
new_vision_features = torch.cat(new_vision_features, 1)
new_vision_shapes = torch.tensor(new_vision_shapes_list, dtype=torch.int64).to(vision_features[0].device)
level_start_index = torch.cat((new_vision_shapes.new_zeros((1,)), new_vision_shapes.prod(1).cumsum(0)[:-1]))
return new_vision_features, new_vision_shapes, new_vision_shapes_list, level_start_index
def _get_decoder_input(
self, vision_features, vision_shapes, class_features, denoise_embeddings=None, denoise_bboxes=None
):
batch_size = len(vision_features)
# prepare input for decoder
anchors, valid_mask = self.generate_anchors(
vision_shapes, device=vision_features.device, dtype=vision_features.dtype
)
predicted_class_features = self.encoder_vision_features(
torch.where(
valid_mask, vision_features, torch.tensor(0.0, dtype=vision_features.dtype).to(vision_features.device)
)
)
original_class_projected = self.encoder_class_head(class_features).permute(1, 2, 0)
encoder_class_similarity = get_class_similarity(
self.class_distance_type, predicted_class_features, original_class_projected
)
# dynamic anchors + static content
# (batch_size, height*width, 4)
encoder_outputs_bboxes = self.encoder_bbox_head(predicted_class_features) + anchors
# query selection
# (batch_size, num_queries)
topk_ind = torch.topk(encoder_class_similarity.max(-1).values, self.num_queries, dim=1).indices.view(-1)
# (batch_size, num_queries)
batch_ind = (
torch.arange(end=batch_size, dtype=topk_ind.dtype, device=topk_ind.device)
.unsqueeze(-1)
.repeat(1, self.num_queries)
.view(-1)
)
reference_points = encoder_outputs_bboxes[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
encoder_bboxes = reference_points.sigmoid()
if denoise_bboxes is not None:
reference_points = torch.cat([denoise_bboxes, reference_points], 1)
if self.training:
reference_points = reference_points.detach()
encoder_class_similarity = encoder_class_similarity[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.learn_initial_query:
embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(batch_size, 1, 1)
else:
embeddings = predicted_class_features[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.training:
embeddings = embeddings.detach()
if denoise_embeddings is not None:
embeddings = torch.cat([denoise_embeddings, embeddings], 1)
return embeddings, reference_points, encoder_bboxes, encoder_class_similarity, anchors
def forward(
self,
vision_features,
class_features,
task_features,
task_mask,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Args:
vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision
backbone.
class_features (`torch.FloatTensor`): The sequence of class features of shape
`(class_sequence_length, batch_size, class_embed_dim)`.
task_features (`torch.FloatTensor`): The sequence of task features of shape
`(task_sequence_length, batch_size, decoder_hidden_dim)`.
task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`.
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention
layers. See `attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See
`hidden_states` under returned tensors for more detail.
return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain
tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_features, vision_shapes, vision_shapes_list, level_start_index = self._get_encoder_input(
vision_features
)
# todo add denoising for training
denoise_embeddings, denoise_bboxes, key_padding_mask = None, None, None
batch_size = task_mask.shape[0]
# compose attn_mask for vision_emb and task_emb fusion
task_features = self.task_encoder(task_features)
if self.task_project is not None:
task_features = self.task_project(task_features)
src_key_mask = (task_mask == 0).detach()
attn_mask_len = self.num_queries
fusion_size = attn_mask_len + task_features.shape[0]
key_padding_mask = torch.zeros([batch_size, fusion_size], dtype=torch.bool).to(task_features.device)
key_padding_mask[:, attn_mask_len:] = src_key_mask
attention_mask = _prepare_4d_attention_mask(~key_padding_mask, dtype=vision_features.dtype)
decoder_embeddings, reference_points, encoder_bboxes, encoder_class_similarity, init_reference_points = (
self._get_decoder_input(
vision_features, tuple(vision_shapes_list), class_features, denoise_embeddings, denoise_bboxes
)
)
all_hidden_states = () if output_hidden_states else None
all_attns = () if output_attentions else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
predicted_class_features = decoder_embeddings
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
decoder_bboxes = []
decoder_classes = []
last_refined_bbox = None
reference_points = reference_points.sigmoid()
for i, layer in enumerate(self.layers):
if self.gradient_checkpointing and self.training:
predicted_class_features, task_features, self_attention, cross_attention = (
self._gradient_checkpointing_func(
layer.__call__,
predicted_class_features,
task_features,
reference_points,
vision_features,
vision_shapes,
vision_shapes_list,
level_start_index=level_start_index,
attention_mask=attention_mask,
query_position=self.query_position_head(reference_points),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
)
else:
predicted_class_features, task_features, self_attention, cross_attention = layer(
predicted_class_features,
task_features,
reference_points,
vision_features,
vision_shapes,
vision_shapes_list,
level_start_index=level_start_index,
attention_mask=attention_mask,
query_position=self.query_position_head(reference_points),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if output_attentions:
all_self_attns = all_self_attns + (self_attention,)
all_cross_attns = all_cross_attns + (cross_attention,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
refined_bbox = torch.sigmoid(
self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(reference_points)
)
original_class_projected = self.decoder_class_head[i](class_features).permute(1, 2, 0)
if self.training:
decoder_classes.append(
get_class_similarity(
class_distance_type=self.class_distance_type,
cls_feature=predicted_class_features,
class_proj=original_class_projected,
)
)
if i == 0:
decoder_bboxes.append(refined_bbox)
else:
decoder_bboxes.append(
torch.sigmoid(
self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(last_refined_bbox)
)
)
elif i == self.decoder_num_layers - 1:
decoder_classes.append(
get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected)
)
decoder_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
reference_points = refined_bbox.detach() if self.training else refined_bbox
if output_attentions:
all_attns += (all_self_attns, all_cross_attns)
last_hidden_state = predicted_class_features
decoder_bboxes = torch.stack(decoder_bboxes)
decoder_classes = torch.stack(decoder_classes)
if not return_dict:
return (
last_hidden_state,
all_hidden_states,
all_attns,
decoder_bboxes,
decoder_classes,
encoder_bboxes,
encoder_class_similarity,
init_reference_points,
reference_points,
)
return OmDetTurboDecoderOutput(
last_hidden_state=last_hidden_state,
hidden_states=all_hidden_states,
attentions=all_attns,
decoder_coords=decoder_bboxes,
decoder_classes=decoder_classes,
encoder_coord_logits=encoder_bboxes,
encoder_class_logits=encoder_class_similarity,
init_reference_points=init_reference_points,
intermediate_reference_points=reference_points,
)
|
class_definition
| 59,202 | 74,316 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
| null | 3,403 |
class OmDetTurboForObjectDetection(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
super().__init__(config)
self.vision_backbone = OmDetTurboVisionBackbone(config)
self.language_backbone = OmDetTurboLanguageBackbone(config)
self.encoder = OmDetTurboHybridEncoder(config)
self.decoder = OmDetTurboDecoder(config)
self.num_queries = config.num_queries
self.language_cache_class = OmDetTurboLRUCache(config.cache_size)
self.language_cache_prompt = OmDetTurboLRUCache(config.cache_size)
self.vocab_size = config.text_config.vocab_size
self.post_init()
def get_input_embeddings(self):
return self.language_backbone.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_backbone.model.set_input_embeddings(value)
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
model_embeds = self.language_backbone.model.resize_token_embeddings(
new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
@add_start_docstrings_to_model_forward(OMDET_TURBO_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OmDetTurboObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: torch.FloatTensor,
classes_input_ids: torch.LongTensor,
classes_attention_mask: torch.LongTensor,
tasks_input_ids: torch.LongTensor,
tasks_attention_mask: torch.LongTensor,
classes_structure: torch.LongTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], OmDetTurboObjectDetectionOutput]:
r"""
Returns:
Examples:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> classes = ["cat", "remote"]
>>> task = "Detect {}.".format(", ".join(classes))
>>> inputs = processor(image, text=classes, task=task, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits)
>>> results = processor.post_process_grounded_object_detection(
... outputs,
... classes=classes,
... target_sizes=[image.size[::-1]],
... score_threshold=0.3,
... nms_threshold=0.3,
>>> )[0]
>>> for score, class_name, box in zip(results["scores"], results["classes"], results["boxes"]):
... box = [round(i, 1) for i in box.tolist()]
... print(
... f"Detected {class_name} with confidence "
... f"{round(score.item(), 2)} at location {box}"
... )
Detected remote with confidence 0.76 at location [39.9, 71.3, 176.5, 117.9]
Detected cat with confidence 0.72 at location [345.1, 22.5, 639.7, 371.9]
Detected cat with confidence 0.65 at location [12.7, 53.8, 315.5, 475.3]
Detected remote with confidence 0.57 at location [333.4, 75.6, 370.7, 187.0]
```"""
if labels is not None:
raise NotImplementedError("Training is not implemented yet")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
image_features = self.vision_backbone(pixel_values)
encoder_outputs = self.encoder(
image_features,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
class_features, task_features, task_mask = self.get_language_embedding(
classes_input_ids,
classes_attention_mask,
tasks_input_ids,
tasks_attention_mask,
classes_structure,
)
encoder_extracted_states = encoder_outputs.extracted_states if return_dict else encoder_outputs[-1]
decoder_outputs = self.decoder(
encoder_extracted_states,
class_features,
task_features,
task_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return tuple(
output
for output in [
loss,
decoder_outputs[3][-1],
decoder_outputs[4][-1],
decoder_outputs[7],
decoder_outputs[8],
decoder_outputs[5],
decoder_outputs[6],
encoder_outputs[-1],
decoder_outputs[1],
decoder_outputs[2],
encoder_outputs[1],
encoder_outputs[2],
classes_structure,
]
if output is not None
)
return OmDetTurboObjectDetectionOutput(
loss=loss,
decoder_coord_logits=decoder_outputs.decoder_coords[-1],
decoder_class_logits=decoder_outputs.decoder_classes[-1],
init_reference_points=decoder_outputs.init_reference_points,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
encoder_coord_logits=decoder_outputs.encoder_coord_logits,
encoder_class_logits=decoder_outputs.encoder_class_logits,
encoder_extracted_states=encoder_outputs.extracted_states,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
classes_structure=classes_structure,
)
|
class_definition
| 74,577 | 81,438 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
| null | 3,404 |
class OmDetTurboConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`].
It is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture
Instantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo
[omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`PretrainedConfig`, *optional*):
The configuration of the text backbone.
backbone_config (`PretrainedConfig`, *optional*):
The configuration of the vision backbone.
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether to use the timm for the vision backbone.
backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`):
The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized
backbone with the same architecture `backbone` is used.
backbone_kwargs (`dict`, *optional*):
Additional kwargs for the vision backbone.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use a pretrained vision backbone.
apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization on the feature maps of the vision backbone output.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each image.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Whether to disable custom kernels.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for layer normalization.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for batch normalization.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
text_projection_in_dim (`int`, *optional*, defaults to 512):
The input dimension for the text projection.
text_projection_out_dim (`int`, *optional*, defaults to 512):
The output dimension for the text projection.
task_encoder_hidden_dim (`int`, *optional*, defaults to 1024):
The feedforward dimension for the task encoder.
class_embed_dim (`int`, *optional*, defaults to 512):
The dimension of the classes embeddings.
class_distance_type (`str`, *optional*, defaults to `"cosine"`):
The type of of distance to compare predicted classes to projected classes embeddings.
Can be `"cosine"` or `"dot"`.
num_queries (`int`, *optional*, defaults to 900):
The number of queries.
csp_activation (`str`, *optional*, defaults to `"silu"`):
The activation function of the Cross Stage Partial (CSP) networks of the encoder.
conv_norm_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function of the ConvNormLayer layers of the encoder.
encoder_feedforward_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the feedforward network of the encoder.
encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate following the activation of the encoder feedforward network.
encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate of the encoder multi-head attention module.
hidden_expansion (`int`, *optional*, defaults to 1):
The hidden expansion of the CSP networks in the encoder.
vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`):
The projected vision features channels used as inputs for the decoder.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the encoder.
encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`):
The input channels for the encoder.
encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`):
The indices of the input features projected by each layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads for the encoder.
encoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the encoder.
encoder_layers (`int`, *optional*, defaults to 1):
The number of layers in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The positional encoding temperature in the encoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels for the multi-scale deformable attention module of the decoder.
decoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the decoder.
decoder_num_heads (`int`, *optional*, defaults to 8):
The number of heads for the decoder.
decoder_num_layers (`int`, *optional*, defaults to 6):
The number of layers for the decoder.
decoder_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the decoder.
decoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the decoder.
decoder_num_points (`int`, *optional*, defaults to 4):
The number of points sampled in the decoder multi-scale deformable attention module.
decoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the decoder.
eval_size (`Tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride (see RTDetr).
learn_initial_query (`bool`, *optional*, defaults to `False`):
Whether to learn the initial query.
cache_size (`int`, *optional*, defaults to 100):
The cache size for the classes and prompts caches.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder-decoder model or not.
kwargs (`Dict[str, Any]`, *optional*):
Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration
and can be used to control the model outputs.
Examples:
```python
>>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection
>>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration
>>> configuration = OmDetTurboConfig()
>>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration
>>> model = OmDetTurboForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "omdet-turbo"
attribute_map = {
"encoder_hidden_dim": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
text_config=None,
backbone_config=None,
use_timm_backbone=True,
backbone="swin_tiny_patch4_window7_224",
backbone_kwargs=None,
use_pretrained_backbone=False,
apply_layernorm_after_vision_backbone=True,
image_size=640,
disable_custom_kernels=False,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
init_std=0.02,
text_projection_in_dim=512,
text_projection_out_dim=512,
task_encoder_hidden_dim=1024,
class_embed_dim=512,
class_distance_type="cosine",
num_queries=900,
csp_activation="silu",
conv_norm_activation="gelu",
encoder_feedforward_activation="relu",
encoder_feedforward_dropout=0.0,
encoder_dropout=0.0,
hidden_expansion=1,
vision_features_channels=[256, 256, 256],
encoder_hidden_dim=256,
encoder_in_channels=[192, 384, 768],
encoder_projection_indices=[2],
encoder_attention_heads=8,
encoder_dim_feedforward=2048,
encoder_layers=1,
positional_encoding_temperature=10000,
num_feature_levels=3,
decoder_hidden_dim=256,
decoder_num_heads=8,
decoder_num_layers=6,
decoder_activation="relu",
decoder_dim_feedforward=2048,
decoder_num_points=4,
decoder_dropout=0.0,
eval_size=None,
learn_initial_query=False,
cache_size=100,
is_encoder_decoder=True,
**kwargs,
):
if use_timm_backbone:
if backbone_config is None:
backbone_kwargs = {
"out_indices": [1, 2, 3],
"img_size": image_size,
"always_partition": True,
}
elif backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `swin` vision config.")
backbone_config = CONFIG_MAPPING["swin"](
window_size=7,
image_size=image_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
out_indices=[2, 3, 4],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
if text_config is None:
logger.info(
"`text_config` is `None`. Initializing the config with the default `clip_text_model` text config."
)
text_config = CONFIG_MAPPING["clip_text_model"]()
elif isinstance(text_config, dict):
text_model_type = text_config.get("model_type")
text_config = CONFIG_MAPPING[text_model_type](**text_config)
if class_distance_type not in ["cosine", "dot"]:
raise ValueError(
f"Invalid `class_distance_type`. It should be either `cosine` or `dot`, but got {class_distance_type}."
)
self.text_config = text_config
self.backbone_config = backbone_config
self.use_timm_backbone = use_timm_backbone
self.backbone = backbone
self.backbone_kwargs = backbone_kwargs
self.use_pretrained_backbone = use_pretrained_backbone
self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.init_std = init_std
self.text_projection_in_dim = text_projection_in_dim
self.text_projection_out_dim = text_projection_out_dim
self.task_encoder_hidden_dim = task_encoder_hidden_dim
self.class_embed_dim = class_embed_dim
self.class_distance_type = class_distance_type
self.num_queries = num_queries
self.csp_activation = csp_activation
self.conv_norm_activation = conv_norm_activation
self.encoder_feedforward_activation = encoder_feedforward_activation
self.encoder_feedforward_dropout = encoder_feedforward_dropout
self.encoder_dropout = encoder_dropout
self.hidden_expansion = hidden_expansion
self.vision_features_channels = vision_features_channels
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.encoder_projection_indices = encoder_projection_indices
self.encoder_attention_heads = encoder_attention_heads
self.encoder_dim_feedforward = encoder_dim_feedforward
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.num_feature_levels = num_feature_levels
self.decoder_hidden_dim = decoder_hidden_dim
self.decoder_num_heads = decoder_num_heads
self.decoder_num_layers = decoder_num_layers
self.decoder_activation = decoder_activation
self.decoder_dim_feedforward = decoder_dim_feedforward
self.decoder_num_points = decoder_num_points
self.decoder_dropout = decoder_dropout
self.eval_size = eval_size
self.learn_initial_query = learn_initial_query
self.cache_size = cache_size
self.is_encoder_decoder = is_encoder_decoder
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
|
class_definition
| 891 | 14,445 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py
| null | 3,405 |
class EnglishNormalizer:
def __init__(self):
# List of (regular expression, replacement) pairs for abbreviations:
self._abbreviations = [
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
for x in [
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("maj", "major"),
("gen", "general"),
("drs", "doctors"),
("rev", "reverend"),
("lt", "lieutenant"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("esq", "esquire"),
("ltd", "limited"),
("col", "colonel"),
("ft", "fort"),
]
]
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
self.teens = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
def number_to_words(self, num: int) -> str:
"""
Converts numbers(`int`) to words(`str`).
Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
"""
if num == 0:
return "zero"
elif num < 0:
return "minus " + self.number_to_words(abs(num))
elif num < 10:
return self.ones[num]
elif num < 20:
return self.teens[num - 10]
elif num < 100:
return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
elif num < 1000:
return (
self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
)
elif num < 1_000_000:
return (
self.number_to_words(num // 1000)
+ " thousand"
+ (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
)
elif num < 1_000_000_000:
return (
self.number_to_words(num // 1_000_000)
+ " million"
+ (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
)
elif num < 1_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000)
+ " billion"
+ (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000)
+ " trillion"
+ (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000_000)
+ " quadrillion"
+ (
", " + self.number_to_words(num % 1_000_000_000_000_000)
if num % 1_000_000_000_000_000 != 0
else ""
)
)
else:
return "number out of range"
def convert_to_ascii(self, text: str) -> str:
"""
Converts unicode to ascii
"""
return text.encode("ascii", "ignore").decode("utf-8")
def _expand_dollars(self, m: str) -> str:
"""
This method is used to expand numerical dollar values into spoken words.
"""
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = "dollar" if dollars == 1 else "dollars"
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = "dollar" if dollars == 1 else "dollars"
return "%s %s" % (dollars, dollar_unit)
elif cents:
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s" % (cents, cent_unit)
else:
return "zero dollars"
def _remove_commas(self, m: str) -> str:
"""
This method is used to remove commas from sentences.
"""
return m.group(1).replace(",", "")
def _expand_decimal_point(self, m: str) -> str:
"""
This method is used to expand '.' into spoken word ' point '.
"""
return m.group(1).replace(".", " point ")
def _expand_ordinal(self, num: str) -> str:
"""
This method is used to expand ordinals such as '1st', '2nd' into spoken words.
"""
ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
num = int(num.group(0)[:-2])
if 10 <= num % 100 and num % 100 <= 20:
suffix = "th"
else:
suffix = ordinal_suffixes.get(num % 10, "th")
return self.number_to_words(num) + suffix
def _expand_number(self, m: str) -> str:
"""
This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
link :
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
"""
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return "two thousand"
elif num > 2000 and num < 2010:
return "two thousand " + self.number_to_words(num % 100)
elif num % 100 == 0:
return self.number_to_words(num // 100) + " hundred"
else:
return self.number_to_words(num)
else:
return self.number_to_words(num)
def normalize_numbers(self, text: str) -> str:
"""
This method is used to normalize numbers within a text such as converting the numbers to words, removing
commas, etc.
"""
text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text)
text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text)
text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text)
text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text)
text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text)
text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text)
return text
def expand_abbreviations(self, text: str) -> str:
"""
Expands the abbreviate words.
"""
for regex, replacement in self._abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(self, text: str) -> str:
"""
Removes multiple whitespaces
"""
return re.sub(re.compile(r"\s+"), " ", text)
def __call__(self, text):
"""
Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
abbreviations
"""
text = self.convert_to_ascii(text)
text = text.lower()
text = self.normalize_numbers(text)
text = self.expand_abbreviations(text)
text = self.collapse_whitespace(text)
text = text.replace('"', "")
return text
|
class_definition
| 660 | 8,855 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/number_normalizer.py
| null | 3,406 |
class ClvpTokenizer(PreTrainedTokenizer):
"""
Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import ClvpTokenizer
>>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
>>> tokenizer("Hello world")["input_ids"]
[62, 84, 28, 2, 179, 79]
>>> tokenizer(" Hello world")["input_ids"]
[2, 62, 84, 28, 2, 179, 79]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[STOP]"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"[STOP]"`):
The pad token of the sequence.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (CLVP tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether to add `bos_token` in front of the sequence when add_special_tokens=True.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether to add `eos_token` in end of the sequence when add_special_tokens=True.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = [
"input_ids",
"attention_mask",
]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="[UNK]",
bos_token="<|endoftext|>",
eos_token="[STOP]",
pad_token="[STOP]",
add_prefix_space=False,
add_bos_token=False,
add_eos_token=False,
**kwargs,
):
bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self._normalizer = None
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
**kwargs,
)
@property
def vocab_size(self):
return len(self.encoder)
@property
def normalizer(self):
if self._normalizer is None:
self._normalizer = EnglishNormalizer()
return self._normalizer
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
# Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
output = bos_token_id + token_ids_0 + eos_token_id
if token_ids_1 is not None:
output = output + bos_token_id + token_ids_1 + eos_token_id
return output
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if not self.add_bos_token:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0))
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
text = self.normalizer(text)
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
# if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ".
bpe_tokens.extend(
"[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token
for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def clean_up_tokenization(self, text):
text = "".join(text)
vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())
text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text
text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text
text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ")
return text
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
|
class_definition
| 2,457 | 14,799 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/tokenization_clvp.py
| null | 3,407 |
class ClvpFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a CLVP feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short
Time Fourier Transform` which should match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 22050):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
default_audio_length (`int`, *optional*, defaults to 6):
The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will
automatically be set to default_audio_length * `self.sampling_rate`.
hop_length (`int`, *optional*, defaults to 256):
Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
chunk_length (`int`, *optional*, defaults to 30):
The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio
sequences.
n_fft (`int`, *optional*, defaults to 1024):
Size of the Fourier transform.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
mel_norms (`list` of length `feature_size`, *optional*):
If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each
mel-filter.
return_attention_mask (`bool`, *optional*, defaults to `False`):
Whether to return the attention mask. If left to the default, it will return the attention mask.
[What are attention masks?](../glossary#attention-mask)
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=22050,
default_audio_length=6,
hop_length=256,
chunk_length=30,
n_fft=1024,
padding_value=0.0,
mel_norms=None,
return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
**kwargs,
):
super().__init__(
feature_size=feature_size,
sampling_rate=sampling_rate,
padding_value=padding_value,
return_attention_mask=return_attention_mask,
**kwargs,
)
self.n_fft = n_fft
self.hop_length = hop_length
self.chunk_length = chunk_length
self.n_samples = chunk_length * sampling_rate
self.nb_max_frames = self.n_samples // hop_length
self.sampling_rate = sampling_rate
self.default_audio_length = default_audio_length
self.mel_norms = mel_norms
self.mel_filters = mel_filter_bank(
num_frequency_bins=1 + (n_fft // 2),
num_mel_filters=feature_size,
min_frequency=0.0,
max_frequency=8000.0,
sampling_rate=sampling_rate,
norm="slaney",
mel_scale="htk",
)
def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
"""
This method first computes the log-mel spectrogram of the provided audio then applies normalization along the
each mel-filterbank, if `mel_norms` is provided.
"""
log_spec = spectrogram(
waveform,
window_function(self.n_fft, "hann"),
frame_length=self.n_fft,
hop_length=self.hop_length,
power=2.0,
mel_filters=self.mel_filters,
log_mel=None,
)
log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None))
if self.mel_norms is not None:
log_spec = log_spec / np.array(self.mel_norms)[:, None]
return log_spec
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
sampling_rate: Optional[int] = None,
truncation: bool = True,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_attention_mask: Optional[bool] = True,
padding: Optional[str] = "max_length",
max_length: Optional[int] = None,
**kwargs,
) -> BatchFeature:
"""
`ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the
voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`.
First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length`
seconds long and then the log-mel spectrogram is extracted from it.
Args:
raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
truncation (`bool`, *optional*, default to `True`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*, defaults to `True`):
Whether to return the attention mask. If left to the default, it will return the attention mask.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values / vectors.
max_length (`int`, *optional*):
The maximum input length of the inputs.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
is_batched = is_batched_numpy or (
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [np.asarray([raw_speech]).T]
batched_speech = BatchFeature({"input_features": raw_speech})
max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
# make sure list is in array format
input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
input_features = [
self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0]
]
if isinstance(input_features[0], List):
padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features]
else:
padded_inputs["input_features"] = input_features
return padded_inputs.convert_to_tensors(return_tensors)
|
class_definition
| 993 | 10,946 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/feature_extraction_clvp.py
| null | 3,408 |
class ClvpEncoderOutput(ModelOutput):
"""
Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection
output (a linear layer on top of the pooled output).
Args:
embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`):
The embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The hidden state of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Pooled output of the `last_hidden_state`.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 6,066 | 8,026 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,409 |
class ClvpOutput(ModelOutput):
"""
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for speech-text similarity.
speech_ids (`torch.LongTensor`, *optional*):
speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model.
logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`):
The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`):
The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of the text encoder
model.
speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder
model.
text_model_output (`BaseModelOutputWithPooling`):
The pooled output of the `last_hidden_state` of the text encoder Model.
speech_model_output (`BaseModelOutputWithPooling`):
The pooled output of the `last_hidden_state` of the speech encoder Model.
decoder_hidden_states (`torch.FloatTensor`, *optional*):
The hidden states of the decoder model.
text_encoder_hidden_states (`torch.FloatTensor`, *optional*):
The hidden states of the text encoder model.
speech_encoder_hidden_states (`torch.FloatTensor`, *optional*):
The hidden states of the speech encoder model.
"""
loss: Optional[torch.FloatTensor] = None
speech_ids: Optional[torch.LongTensor] = None
logits_per_speech: torch.FloatTensor = None
logits_per_text: torch.FloatTensor = None
text_embeds: torch.FloatTensor = None
speech_embeds: torch.FloatTensor = None
text_model_output: BaseModelOutputWithPooling = None
speech_model_output: BaseModelOutputWithPooling = None
decoder_hidden_states: torch.FloatTensor = None
text_encoder_hidden_states: torch.FloatTensor = None
speech_encoder_hidden_states: torch.FloatTensor = None
|
class_definition
| 8,040 | 10,538 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,410 |
class ClvpRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ClvpRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
class_definition
| 10,626 | 11,344 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,411 |
class ClvpRotaryPositionalEmbedding(nn.Module):
"""
Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY
POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf .
"""
def __init__(self, config):
super().__init__()
dim = max(config.projection_dim // (config.num_attention_heads * 2), 32)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.cached_sequence_length = None
self.cached_rotary_positional_embedding = None
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
sequence_length = hidden_states.shape[1]
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
return self.cached_rotary_positional_embedding
self.cached_sequence_length = sequence_length
time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq)
freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
embeddings = torch.cat((freqs, freqs), dim=-1)
self.cached_rotary_positional_embedding = embeddings.unsqueeze(0)
return self.cached_rotary_positional_embedding
|
class_definition
| 11,347 | 12,707 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,412 |
class ClvpSelfAttention(nn.Module):
"""
Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
if hasattr(config, "max_position_embeddings"):
max_positions = config.max_position_embeddings
bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool))
bias = bias.view(1, 1, max_positions, max_positions)
self.register_buffer("bias", bias, persistent=False)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
# Copied from transformers.models.clip.modeling_clip.CLIPAttention._shape
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.FloatTensor,
rotary_pos_emb: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
use_cache: Optional[bool] = False,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
# Raise error when position_ids is None but rotary_pos_emb is provided, because we need that when applying
# rotary_pos_emb to query and key states.
if rotary_pos_emb is not None and position_ids is None:
raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.")
bsz, _, embed_dim = hidden_states.size()
# get query proj
query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if past_key_value is not None:
past_key, past_value = past_key_value
key_states = torch.cat((past_key, key_states), dim=-2)
value_states = torch.cat((past_value, value_states), dim=-2)
if use_cache is True:
present = (key_states, value_states)
else:
present = None
if rotary_pos_emb is not None:
rotary_emb_dim = rotary_pos_emb.shape[-1]
# Partial rotary embedding
query_rot, query_pass = (
query_states[..., :rotary_emb_dim],
query_states[..., rotary_emb_dim:],
)
key_rot, key_pass = (
key_states[..., :rotary_emb_dim],
key_states[..., rotary_emb_dim:],
)
value_rot, value_pass = (
value_states[..., :rotary_emb_dim],
value_states[..., rotary_emb_dim:],
)
cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0)
query_rot, key_rot, value_rot = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids)
# [batch_size, num_heads, seq_length, head_dim]
query_states = torch.cat((query_rot, query_pass), dim=-1)
key_states = torch.cat((key_rot, key_pass), dim=-1)
value_states = torch.cat((value_rot, value_pass), dim=-1)
tgt_len = query_states.shape[2]
src_len = key_states.shape[2]
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_probs, value_states)
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, present, attn_weights
|
class_definition
| 12,710 | 18,428 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,413 |
class ClvpGatedLinearUnit(nn.Module):
"""
`ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the
`hidden_states` which controls the flow of data from the first of the tensor.
"""
def __init__(self, config):
super().__init__()
self.activation_fn = ACT2FN[config.hidden_act]
self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2)
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
return hidden_states * self.activation_fn(gate)
|
class_definition
| 18,431 | 19,081 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,414 |
class ClvpEncoderMLP(nn.Module):
"""
This MLP is used in CLVP speech or text encoder models.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.fc1 = ClvpGatedLinearUnit(config)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout_layer = nn.Dropout(config.dropout)
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.dropout_layer(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class_definition
| 19,084 | 19,722 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,415 |
class ClvpEncoderLayer(nn.Module):
def __init__(self, config: ClvpConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.self_attn = ClvpSelfAttention(config)
self.mlp = ClvpEncoderMLP(config)
self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.FloatTensor,
rotary_pos_emb: torch.FloatTensor,
attention_mask: torch.LongTensor,
position_ids: torch.LongTensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
input to the layer.
rotary_pos_emb (`torch.FloatTensor`):
rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.
attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):
attention mask where padding elements are indicated by very large negative values.
position_ids (`torch.LongTensor`):
Denotes position ids of the input tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.input_rmsnorm(hidden_states)
attention_outputs = self.self_attn(
hidden_states=hidden_states,
rotary_pos_emb=rotary_pos_emb,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
)
hidden_states = attention_outputs[0]
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_rmsnorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_outputs[-1],)
return outputs
|
class_definition
| 19,725 | 22,091 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,416 |
class ClvpDecoderMLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class_definition
| 22,181 | 22,879 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,417 |
class ClvpDecoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = ClvpSelfAttention(config)
self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = ClvpDecoderMLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
attn_outputs = self.attn(
hidden_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs
|
class_definition
| 22,882 | 24,878 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,418 |
class ClvpConditioningEncoder(nn.Module):
"""
This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the
tokenizer) as inputs for the decoder model.
First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each
of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards.
Both of these vectors are concatenated and then passed to the decoder model.
The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the
"voice characteristics" into the generated mel tokens.
"""
def __init__(self, config: ClvpConfig):
super().__init__()
self.text_config = config.text_config
self.decoder_config = config.decoder_config
self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size)
self.text_position_embedding = nn.Embedding(
self.decoder_config.max_text_tokens, self.decoder_config.hidden_size
)
self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1)
# define group norms to be used before each attention layer
num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size)
self.group_norms = nn.ModuleList(
[
nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True)
for _ in range(self.decoder_config.num_mel_attn_blocks)
]
)
# define the attention layers
self.mel_attn_blocks = nn.ModuleList(
[ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)]
)
self.gradient_checkpointing = False
def compute_groupnorm_groups(self, channels: int, groups: int = 32):
"""
Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise
repository. link :
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26
"""
if channels <= 16:
groups = 8
elif channels <= 64:
groups = 16
while channels % groups != 0:
groups = int(groups / 2)
if groups <= 2:
raise ValueError(
f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}."
f"Please consider using a different `hidden_size`"
)
return groups
def forward(
self,
input_features: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
# process text
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.size()
elif inputs_embeds is not None:
batch_size, seq_length = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
# construct attention mask if not given
if attention_mask is None:
attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device)
# We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple
# This logic is specific to ClvpConditioningEncoder and not used by other modules.
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
input_ids,
attention_mask,
bos_token_id=self.text_config.bos_token_id,
eos_token_id=self.text_config.eos_token_id,
)
inputs_embeds = self.text_token_embedding(input_ids)
position_ids = attention_mask.cumsum(-1) - 1
position_embeds = self.text_position_embedding(position_ids)
text_embeds = inputs_embeds + position_embeds
if self.gradient_checkpointing and self.training:
# process each log-mel spectrogram into a single vector
mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features)
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
residual_mel_spec = mel_spec.transpose(1, 2)
mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2)
mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec
mel_spec = mel_spec.transpose(1, 2)
else:
# process each log-mel spectrogram into a single vector
mel_spec = self.mel_conv(input_features)
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
residual_mel_spec = mel_spec.transpose(1, 2)
mel_spec = self.group_norms[i](mel_spec).transpose(1, 2)
mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec
mel_spec = mel_spec.transpose(1, 2)
mel_spec = mel_spec[:, :, 0]
mel_spec = mel_spec.unsqueeze(1)
# repeat if there is either (1 text vs N audios) or (N texts vs 1 audio)
if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1:
text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1)
elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1:
mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1)
# If there is N texts and M audios we will raise error since the number of text and audio must be same.
elif text_embeds.shape[0] != mel_spec.shape[0]:
raise ValueError(
f"The number of texts and number of audios must be same. "
f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios"
)
return torch.concat([mel_spec, text_embeds], dim=1)
|
class_definition
| 24,881 | 31,146 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,419 |
class ClvpPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ClvpConfig
base_model_prefix = "clvp"
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=factor * 0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, ClvpEncoderMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, ClvpEncoder):
config = self.config.get_text_config()
factor = config.initializer_factor
module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5))
elif isinstance(module, ClvpConditioningEncoder):
module.mel_conv.weight.data.normal_(mean=0.0, std=factor)
module.mel_conv.bias.data.zero_()
elif isinstance(module, ClvpForCausalLM):
for name, p in module.named_parameters():
if name == "c_proj.weight":
p.data.normal_(
mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers))
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
class_definition
| 31,149 | 33,282 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,420 |
class ClvpEncoder(ClvpPreTrainedModel):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`ClvpEncoderLayer`].
Args:
config: ClvpConfig
"""
def __init__(self, config: ClvpConfig):
super().__init__(config)
self.config = config
self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None
self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.sequence_summary = SequenceSummary(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.token_embedding
def set_input_embeddings(self, value):
self.token_embedding = value
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
input embeddings for the model. This bypasses the model's internal embedding lookup matrix.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor`, *optional*):
Denotes the position ids of `input_ids`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
inputs_embeds = self.token_embedding(input_ids)
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
# expand attention_mask and create position_ids if needed
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = torch.utils.checkpoint.checkpoint(
encoder_layer.__call__,
hidden_states,
rotary_pos_emb,
attention_mask,
position_ids,
)
else:
layer_outputs = encoder_layer(
hidden_states,
rotary_pos_emb,
attention_mask,
position_ids,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
last_hidden_state = hidden_states
last_hidden_state = self.final_layer_norm(last_hidden_state)
# take the mean over axis 1 and get pooled output
pooled_output = self.sequence_summary(last_hidden_state)
# apply the projection layer
embeds = self.projection(pooled_output)
if not return_dict:
return tuple(
v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None
)
return ClvpEncoderOutput(
embeds=embeds,
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_states,
attentions=all_attentions,
)
|
class_definition
| 39,973 | 46,810 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,421 |
class ClvpDecoder(ClvpPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`]
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size)
self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size)
self.drop = nn.Dropout(self.config.embd_pdrop)
self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)])
self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.input_embeds_layer
def set_input_embeddings(self, new_embeddings):
self.input_embeds_layer = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.layers[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_key_values_length = 0
past_key_values = tuple([None] * len(self.layers))
else:
past_key_values_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(
past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
if inputs_embeds is None:
inputs_embeds = self.input_embeds_layer(input_ids)
position_embeds = self.position_embeds_layer(position_ids)
inputs_embeds = inputs_embeds + position_embeds
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x num_attention_heads x N x N
# head_mask has shape num_hidden_layers x batch x num_attention_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_embeds = self.input_embeds_layer(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, past_key_value) in enumerate(zip(self.layers, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
outputs = torch.utils.checkpoint.checkpoint(
block.__call__,
hidden_states,
None,
attention_mask,
position_ids,
head_mask[i],
)
else:
outputs = block(
hidden_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
|
class_definition
| 46,813 | 54,421 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,422 |
class ClvpModel(ClvpPreTrainedModel):
def __init__(self, config: ClvpDecoderConfig):
super().__init__(config)
self.config = config
self.decoder = ClvpDecoder(self.config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.input_embeds_layer
def set_input_embeddings(self, value):
self.decoder.input_embeds_layer = value
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
|
class_definition
| 54,573 | 57,323 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,423 |
class ClvpForCausalLM(ClvpPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = ClvpModel(self.config)
self.final_norm = nn.LayerNorm(self.config.hidden_size)
self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.input_embeds_layer
def set_input_embeddings(self, new_embeddings):
self.model.decoder.input_embeds_layer = new_embeddings
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
"""
This function extracts the model-specific `inputs` for generation.
"""
input_name = self.main_input_name
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None}
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
inputs, bos_token_id, model_kwargs=model_kwargs
)
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
# Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds.
# Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here.
conditioning_embeds = model_kwargs.get("conditioning_embeds", None)
if conditioning_embeds is not None:
mel_start_token_embedding = self.model.decoder.input_embeds_layer(
torch.full(
(conditioning_embeds.shape[0], 1),
fill_value=self.config.bos_token_id,
device=conditioning_embeds.device,
)
)
mel_start_token_embedding += self.model.decoder.position_embeds_layer(
torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device)
)
conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1)
# subtract the positional_ids here
if hasattr(model_kwargs, "attention_mask"):
position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1
else:
position_ids = torch.range(
0, conditioning_embeds.shape[1] - 1, dtype=torch.long, device=conditioning_embeds.device
)
position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1)
model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer(
position_ids
)
model_kwargs["input_ids"] = (
torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device)
* self.config.bos_token_id
)
return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
return inputs, input_name, model_kwargs
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, inputs_embeds=None, conditioning_embeds=None, **kwargs
):
# Overwritten: has `conditioning_embeds`-related logic
input_ids_length = input_ids.shape[-1]
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past_key_values:
past_length = past_key_values[0][0].shape[2]
# Some generation methods already pass only the last input ID
if input_ids.shape[1] > past_length:
remove_prefix_length = past_length
else:
# Default to old behavior: keep only final ID
remove_prefix_length = input_ids.shape[1] - 1
input_ids = input_ids[:, remove_prefix_length:]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
if conditioning_embeds is not None and past_key_values is not None:
position_ids = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"token_type_ids": token_type_ids,
}
)
return model_inputs
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
lm_logits = self.final_norm(hidden_states)
lm_logits = self.lm_head(lm_logits)
loss = None
if labels is not None:
labels = labels.to(lm_logits.device)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@staticmethod
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel._reorder_cache
def _reorder_cache(
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past_key_values
)
|
class_definition
| 57,446 | 67,600 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,424 |
class ClvpModelForConditionalGeneration(ClvpPreTrainedModel, GenerationMixin):
config_class = ClvpConfig
def __init__(self, config: ClvpConfig):
super().__init__(config)
if not isinstance(config.text_config, ClvpEncoderConfig):
raise TypeError(
"config.text_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.speech_config, ClvpEncoderConfig):
raise TypeError(
"config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.speech_config)}."
)
if not isinstance(config.decoder_config, ClvpDecoderConfig):
raise TypeError(
"config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type"
f" {type(config.decoder_config)}."
)
self.conditioning_encoder = ClvpConditioningEncoder(config)
self.speech_decoder_model = ClvpForCausalLM(config.decoder_config)
self.text_encoder_model = ClvpEncoder(config.text_config)
self.speech_encoder_model = ClvpEncoder(config.speech_config)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
# taken from the original repo,
# link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/api.py#L117
def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:
"""
This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
last few tokens of each sequence.
Args:
speech_ids (`torch.LongTensor`):
This refers to the output of the decoder model.
"""
decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes
speech_ids = speech_ids[:, 1:]
stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)
speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])
for i, each_seq_stop_token_index in enumerate(stop_token_indices):
# This means that no stop tokens were found so the sentence was still being generated, in that case we don't need
# to apply any padding so just skip to the next sequence of tokens.
if each_seq_stop_token_index.sum() == 0:
continue
stm = each_seq_stop_token_index.argmax()
speech_ids[i, stm:] = decoder_fixing_codes[0]
if stm - 3 < speech_ids.shape[1]:
speech_ids[i, -3:] = torch.tensor(
[decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long
)
return speech_ids
def get_text_features(
self,
input_ids: Optional[torch.LongTensor] = None,
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
r"""
This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the
projection layer to the pooled output of the CLVP text encoder model.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
[What are input IDs?](../glossary#input-ids)
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for the text encoder model passed in place of `input_ids`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Returns:
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text
Model.
Examples:
```python
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text
>>> text = "This is an example text."
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # Generate processor output and text embeds
>>> processor_output = processor(text=text, return_tensors="pt")
>>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"])
```
"""
outputs = self.text_encoder_model(
input_ids=input_ids,
inputs_embeds=text_encoder_inputs_embeds,
attention_mask=attention_mask,
)
return outputs[0]
def get_speech_features(
self,
speech_ids: Optional[torch.LongTensor] = None,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
**kwargs,
) -> torch.FloatTensor:
r"""
This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech
model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the
decoder model will be used to first generate the speech_ids and then applying the speech model.
Args:
speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*):
Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided
then input_ids and input_features will be automatically ignored.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids
and input_features will be used.
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. If
speech_ids is not provided, then input_ids and input_features will be used.
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
generation_config (`GenerationConfig`, *optional*):
generation config to control the generation of speech_ids if they are not provided.
Returns:
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech
Model.
Examples:
```python
>>> import datasets
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
>>> text = "This is an example text."
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
>>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # Generate processor output and model output
>>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
>>> speech_embeds = model.get_speech_features(
... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"]
... )
```
"""
if speech_ids is None:
if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None:
raise ValueError(
"Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided."
)
if generation_config is None:
generation_config = self.generation_config
generation_config.update(**kwargs)
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
inputs_embeds=conditioning_encoder_inputs_embeds,
attention_mask=attention_mask,
)
speech_ids = self.speech_decoder_model.generate(
conditioning_embeds=conditioning_embeds,
generation_config=generation_config,
)
speech_ids = self.fix_speech_decoder_output(speech_ids[0])
outputs = self.speech_encoder_model(
input_ids=speech_ids,
attention_mask=attention_mask,
)
return outputs[0]
@add_start_docstrings_to_model_forward(CLVP_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ClvpOutput, config_class=ClvpConfig)
def forward(
self,
input_ids: torch.LongTensor = None,
input_features: torch.FloatTensor = None,
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = False,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ClvpOutput]:
r"""
Returns:
Examples:
```python
>>> import datasets
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
>>> text = "This is an example text."
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
>>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # processor outputs and model outputs
>>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
>>> outputs = model(
... input_ids=processor_output["input_ids"],
... input_features=processor_output["input_features"],
... return_dict=True,
... )
```
"""
# Use CLVP model's config for some fields (if specified) instead of those of speech & text components.
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
inputs_embeds=conditioning_encoder_inputs_embeds,
attention_mask=attention_mask,
)
decoder_outputs = self.speech_decoder_model(
inputs_embeds=conditioning_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
speech_ids = decoder_outputs[0]
# since we will get the embeds of shape `(batch_size, seq_len, embedding_dim)` during the forward pass
# we must convert it to tokens, to make it compaitable with speech_transformer
if speech_ids.ndim == 3:
speech_ids = speech_ids.argmax(2)
speech_ids = self.fix_speech_decoder_output(speech_ids)
speech_outputs = self.speech_encoder_model(
input_ids=speech_ids,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_encoder_model(
input_ids=input_ids,
inputs_embeds=text_encoder_inputs_embeds,
attention_mask=attention_mask,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
speech_embeds = speech_outputs[0]
text_embeds = text_outputs[0]
# normalized features
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
logits_per_speech = logits_per_text.t()
loss = None
if return_loss:
loss = clvp_loss(logits_per_text)
if not return_dict:
output = (
logits_per_speech,
logits_per_text,
text_embeds,
speech_embeds,
text_outputs[2],
speech_outputs[2],
)
if output_hidden_states:
output += (
decoder_outputs[-1],
text_outputs[-1],
speech_outputs[-1],
)
return ((loss,) + output) if loss is not None else output
return ClvpOutput(
loss=loss,
logits_per_speech=logits_per_speech,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
speech_embeds=speech_embeds,
text_model_output=text_outputs[2],
speech_model_output=speech_outputs[2],
decoder_hidden_states=decoder_outputs.hidden_states,
text_encoder_hidden_states=text_outputs.hidden_states,
speech_encoder_hidden_states=speech_outputs.hidden_states,
)
@torch.no_grad()
def generate(
self,
input_ids: torch.LongTensor = None,
input_features: torch.FloatTensor = None,
attention_mask: Optional[torch.LongTensor] = None,
generation_config: Optional[GenerationConfig] = None,
pad_to_max_mel_tokens: Optional[int] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
):
"""
Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of
`ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using
`ClvpEncoder`.
Args:
input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input text Tokens. Processed from the [`ClvpTokenizer`].
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`].
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
pad_to_max_mel_tokens (`int`, *optional*):
Pads generated speech_ids to the specified value. This is to implement the same logic from the official
repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
and to make sure the logits are same.
This does not affect generation quality so please don't consider using it since it is less efficient.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of decoder model, text encoder and speech encoder models.
Returns:
`ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when
`config.return_dict_in_generate=True`) or a tuple.
"""
# If the input sequences are larger than (self.config.decoder_config.max_text_tokens - 3) then raise error,
# because we need to add 3 tokens ( 1 bos tokens and 2 eos tokens) to the input_ids in ClvpConditioningEncoder to
# properly sample
sequence_length = input_ids.shape[-1]
if sequence_length > (self.config.decoder_config.max_text_tokens - 3):
raise ValueError(
f"Maximum sequence length reached! Found input_ids of length {sequence_length}."
f"Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}"
)
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
# pad input_ids as specified in the original repo
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L380
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
input_ids,
attention_mask,
add_bos_token=False,
bos_token_id=self.config.text_config.bos_token_id,
eos_token_id=self.config.text_config.eos_token_id,
)
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
attention_mask=attention_mask,
)
decoder_outputs = self.speech_decoder_model.generate(
conditioning_embeds=conditioning_embeds,
generation_config=generation_config,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
if isinstance(decoder_outputs, ModelOutput):
speech_ids = decoder_outputs.sequences
# pad to pad_to_max_mel_tokens if given, to replicate the original repo logic
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
if pad_to_max_mel_tokens is not None:
padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1]
speech_ids = torch.nn.functional.pad(
speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id
)
speech_ids = self.fix_speech_decoder_output(speech_ids)
speech_outputs = self.speech_encoder_model(
input_ids=speech_ids,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
text_outputs = self.text_encoder_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
speech_embeds = speech_outputs[0]
text_embeds = text_outputs[0]
# normalized features
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
logits_per_speech = logits_per_text.t()
if not generation_config.return_dict_in_generate:
output = (
speech_ids,
logits_per_speech,
logits_per_text,
text_embeds,
speech_embeds,
text_outputs[2],
speech_outputs[2],
)
if output_hidden_states:
output += (
decoder_outputs[-1],
text_outputs[-1],
speech_outputs[-1],
)
return output
return ClvpOutput(
speech_ids=speech_ids,
logits_per_speech=logits_per_speech,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
speech_embeds=speech_embeds,
text_model_output=text_outputs[2],
speech_model_output=speech_outputs[2],
decoder_hidden_states=decoder_outputs.hidden_states,
text_encoder_hidden_states=text_outputs.hidden_states,
speech_encoder_hidden_states=speech_outputs.hidden_states,
)
|
class_definition
| 67,914 | 91,303 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/modeling_clvp.py
| null | 3,425 |
class ClvpProcessor(ProcessorMixin):
r"""
Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
[`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
[`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
Args:
feature_extractor (`ClvpFeatureExtractor`):
An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`ClvpTokenizer`):
An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "ClvpFeatureExtractor"
tokenizer_class = "ClvpTokenizer"
model_input_names = [
"input_ids",
"input_features",
"attention_mask",
]
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
information.
"""
raw_speech = kwargs.pop("raw_speech", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if raw_speech is None and text is None:
raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
if raw_speech is not None:
inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif raw_speech is None:
return encodings
else:
inputs["input_ids"] = encodings["input_ids"]
inputs["attention_mask"] = encodings["attention_mask"]
return inputs
# Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
# Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
|
class_definition
| 689 | 3,603 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/processing_clvp.py
| null | 3,426 |
class ClvpEncoderConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP
text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults
will yield a similar configuration to that of the encoder of the CLVP
[susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256):
Vocabulary size of the CLVP Encoder model.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 768):
Dimensionality of the projection vector.
num_hidden_layers (`int`, *optional*, defaults to 20):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`].
use_rotary_embedding (`bool`, *optional*, defaults to `True`):
Whether to use rotary_embedding or not.
use_attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in Query, Key and Value layers during self attention.
summary_type (`str`, *optional*, defaults to `"mean"`):
What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and
`"cls_index"` are supported.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
bos_token_id (`int`, *optional*, defaults to 255):
Beginning of sequence token id.
eos_token_id (`int`, *optional*, defaults to 0):
End of sequence token id.
Example:
```python
>>> from transformers import ClvpEncoderConfig, ClvpEncoder
>>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration
>>> encoder_configuration = ClvpEncoderConfig()
>>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration
>>> model = ClvpEncoder(encoder_configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clvp_encoder"
base_config_key = ["text_config", "speech_config"]
def __init__(
self,
vocab_size=256,
hidden_size=768,
intermediate_size=1536,
projection_dim=768,
num_hidden_layers=20,
num_attention_heads=12,
hidden_act="gelu",
layer_norm_eps=1e-5,
attention_dropout=0.1,
dropout=0.1,
use_rotary_embedding=True,
use_attention_bias=False,
summary_type="mean",
initializer_factor=1.0,
bos_token_id=255,
eos_token_id=0,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.dropout = dropout
self.use_rotary_embedding = use_rotary_embedding
self.use_attention_bias = use_attention_bias
self.summary_type = summary_type
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs
) -> "PretrainedConfig":
cls._set_token_in_kwargs(kwargs)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# make sure to have the config_type be either "text_config" or "speech_config"
# this is to make sure that we can load only text or speech configs from the nested ClvpConfig.
if config_type not in cls.base_config_key:
raise ValueError(
f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}"
)
# get the text config dict if we are loading from ClvpConfig
if config_dict.get("model_type") == "clvp":
config_dict = config_dict[config_type]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
|
class_definition
| 861 | 7,108 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/configuration_clvp.py
| null | 3,427 |
class ClvpDecoderConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP
Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Decoder part of the CLVP
[susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
The architecture is similar to GPT2.
Args:
vocab_size (`int`, *optional*, defaults to 8194):
Vocabulary size of the model.
max_position_embeddings (`int`, *optional*, defaults to 608):
The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions`
in `GPT2Config`.
max_text_tokens (`int`, *optional*, defaults to 404):
The maximum sequence length of text tokens that this model might ever be used with. Similar to
`n_positions` in `GPT2Config`.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the embeddings and hidden states.
num_hidden_layers (`int`, *optional*, defaults to 30):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`.
num_mel_attn_blocks (`int`, *optional*, defaults to 6):
Denotes the number of self attention layers in [`ClvpConditioningEncoder`].
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (`string`, *optional*, defaults to `"cls_index"`):
Argument used when doing sequence summary.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio to be used after the projection and activation.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
bos_token_id (`int`, *optional*, defaults to 8192):
Beginning of sequence token id, used at the start of the generation.
eos_token_id (`int`, *optional*, defaults to 8193):
End of sequence token id, used in the method
[`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs.
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`].
use_attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in Query, Key and Value layers during self attention.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`):
These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs.
Example:
```python
>>> from transformers import ClvpDecoderConfig, ClvpDecoder
>>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration
>>> decoder_configuration = ClvpDecoderConfig()
>>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration
>>> model = ClvpDecoder(decoder_configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clvp_decoder"
base_config_key = "decoder_config"
def __init__(
self,
vocab_size=8194,
max_position_embeddings=608,
max_text_tokens=404,
hidden_size=1024,
num_hidden_layers=30,
num_attention_heads=16,
n_inner=None,
num_mel_attn_blocks=6,
activation_function="gelu_new",
resid_pdrop=0.1,
embd_pdrop=0.1,
attention_dropout=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
use_cache=True,
bos_token_id=8192,
eos_token_id=8193,
feature_size=80,
use_attention_bias=True,
initializer_factor=1.0,
decoder_fixing_codes=[83, 45, 45, 248],
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.max_text_tokens = max_text_tokens
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_inner = n_inner
self.num_mel_attn_blocks = num_mel_attn_blocks
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attention_dropout = attention_dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.use_cache = use_cache
self.feature_size = feature_size
self.use_attention_bias = use_attention_bias
self.initializer_factor = initializer_factor
self.decoder_fixing_codes = decoder_fixing_codes
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class_definition
| 7,111 | 15,147 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/configuration_clvp.py
| null | 3,428 |
class ClvpConfig(PretrainedConfig):
r"""
[`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It
is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and
decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that
of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize the CLVP text encoder.
speech_config (`dict`, *optional*):
Dictionary of configuration options used to initialize CLVP speech encoder.
decoder_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`ClvpDecoderConfig`].
projection_dim (`int`, *optional*, defaults to 768):
Dimensionality of text and speech projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original CLVP implementation.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration
>>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration
>>> configuration = ClvpConfig()
>>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration
>>> model = ClvpModelForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig
>>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig
>>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration
>>> config_text = ClvpEncoderConfig()
>>> config_speech = ClvpEncoderConfig()
>>> decoder_config = ClvpDecoderConfig()
>>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config)
```"""
model_type = "clvp"
sub_configs = {
"text_config": ClvpEncoderConfig,
"speech_config": ClvpEncoderConfig,
"decoder_config": ClvpDecoderConfig,
}
def __init__(
self,
text_config=None,
speech_config=None,
decoder_config=None,
projection_dim=768,
logit_scale_init_value=2.6592,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
if text_config is None:
text_config = {}
logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.")
if speech_config is None:
speech_config = {}
logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.")
if decoder_config is None:
decoder_config = {}
logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.")
self.text_config = ClvpEncoderConfig(**text_config)
self.speech_config = ClvpEncoderConfig(**speech_config)
self.decoder_config = ClvpDecoderConfig(**decoder_config)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = initializer_factor
@classmethod
def from_sub_model_configs(
cls,
text_config: ClvpEncoderConfig,
speech_config: ClvpEncoderConfig,
decoder_config: ClvpDecoderConfig,
**kwargs,
):
r"""
Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model
configuration and CLVP decoder model configuration.
Args:
text_config (`ClvpEncoderConfig`):
Text model configuration of type [`ClvpEncoderConfig`].
speech_config (`ClvpEncoderConfig`):
Speech model configuration of type [`ClvpEncoderConfig`].
decoder_config (`ClvpDecoderConfig`):
Decoder model configuration of type [`ClvpDecoderConfig`].
Returns:
[`ClvpConfig`]: An instance of a configuration object
"""
return cls(
text_config=text_config.to_dict(),
speech_config=speech_config.to_dict(),
decoder_config=decoder_config.to_dict(),
**kwargs,
)
|
class_definition
| 15,150 | 20,257 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/clvp/configuration_clvp.py
| null | 3,429 |
class M2M100ScaledWordEmbedding(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
|
class_definition
| 3,335 | 3,822 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,430 |
class M2M100SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(
self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
|
class_definition
| 3,825 | 7,426 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,431 |
class M2M100Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[M2M100Config] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
|
class_definition
| 7,514 | 14,908 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,432 |
class M2M100FlashAttention2(M2M100Attention):
"""
M2M100 flash attention module. This module inherits from `M2M100Attention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# M2M100FlashAttention2 attention does not support output_attentions
if output_attentions:
raise ValueError("M2M100FlashAttention2 attention does not support output_attentions")
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, q_len, _ = hidden_states.size()
# get query proj
query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0].transpose(1, 2)
value_states = past_key_value[1].transpose(1, 2)
elif is_cross_attention:
# cross_attentions
key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
else:
# self_attention
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (LlamaRMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
q_len,
dropout=self.dropout if self.training else 0.0,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
|
class_definition
| 15,002 | 21,450 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,433 |
class M2M100SdpaAttention(M2M100Attention):
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
if output_attentions or layer_head_mask is not None:
# TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
logger.warning_once(
"M2M100Model is using M2M100SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
return super().forward(
hidden_states,
key_value_states=key_value_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
query_states = self._shape(query_states, tgt_len, bsz)
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
is_causal = True if self.is_causal and attention_mask is None and tgt_len > 1 else False
# NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
# but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask,
dropout_p=self.dropout if self.training else 0.0,
is_causal=is_causal,
)
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, None, past_key_value
|
class_definition
| 21,542 | 27,327 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,434 |
class M2M100EncoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation](
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class_definition
| 27,437 | 30,575 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,435 |
class M2M100DecoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation](
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation](
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
|
class_definition
| 30,829 | 36,709 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,436 |
class M2M100PreTrainedModel(PreTrainedModel):
config_class = M2M100Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["M2M100EncoderLayer", "M2M100DecoderLayer"]
_supports_flash_attn_2 = True
_supports_sdpa = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
class_definition
| 36,712 | 37,462 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,437 |
class M2M100Encoder(M2M100PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`M2M100EncoderLayer`].
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = M2M100ScaledWordEmbedding(
config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale
)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
self._use_sdpa = config._attn_implementation == "sdpa"
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
embed_pos = self.embed_positions(input_ids, inputs_embeds)
embed_pos = embed_pos.to(inputs_embeds.device)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
if self._use_flash_attention_2:
attention_mask = attention_mask if 0 in attention_mask else None
elif self._use_sdpa and head_mask is None and not output_attentions:
# output_attentions=True & head_mask can not be supported when using SDPA, fall back to
# the manual implementation that requires a 4D causal mask in all cases.
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
else:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
encoder_layer.__call__,
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
output_attentions,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
|
class_definition
| 45,044 | 53,896 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,438 |
class M2M100Decoder(M2M100PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`]
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = M2M100ScaledWordEmbedding(
config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale
)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)])
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
self._use_sdpa = config._attn_implementation == "sdpa"
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self._use_flash_attention_2:
# 2d mask is passed through the layers
combined_attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
elif self._use_sdpa and not output_attentions and cross_attn_head_mask is None:
# output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
# the manual implementation that requires a 4D causal mask in all cases.
combined_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
input_shape,
inputs_embeds,
past_key_values_length,
)
else:
# 4d mask is passed through the layers
combined_attention_mask = _prepare_4d_causal_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if self._use_flash_attention_2:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self._use_sdpa and cross_attn_head_mask is None and not output_attentions:
# output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
# the manual implementation that requires a 4D causal mask in all cases.
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
encoder_attention_mask,
inputs_embeds.dtype,
tgt_len=input_shape[-1],
)
else:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# embed positions
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
positions = positions.to(inputs_embeds.device)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
decoder_layer.__call__,
hidden_states,
combined_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
output_attentions,
use_cache,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
continue
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
|
class_definition
| 53,899 | 68,563 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,439 |
class M2M100Model(M2M100PreTrainedModel):
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: M2M100Config):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = M2M100ScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale)
self.encoder = M2M100Encoder(config, self.shared)
self.decoder = M2M100Decoder(config, self.shared)
if config._attn_implementation == "flash_attention_2":
logger.warning_once(
"Attention with Flash Attention 2 does not support `layer_head_mask`. If you need this feature, please use standard attention."
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
|
class_definition
| 68,712 | 74,082 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,440 |
class M2M100ForConditionalGeneration(M2M100PreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
def __init__(self, config: M2M100Config):
super().__init__(config)
self.model = M2M100Model(config)
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(M2M_100_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
# move labels to the correct device to enable PP
labels = labels.to(lm_logits.device)
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
)
return reordered_past
|
class_definition
| 74,220 | 79,144 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
| null | 3,441 |
class M2M100Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an
M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M2M100
[facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`M2M100Model`] or
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import M2M100Config, M2M100Model
>>> # Initializing a M2M100 facebook/m2m100_418M style configuration
>>> configuration = M2M100Config()
>>> # Initializing a model (with random weights) from the facebook/m2m100_418M style configuration
>>> model = M2M100Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "m2m_100"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=128112,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
use_cache=True,
is_encoder_decoder=True,
activation_function="relu",
d_model=1024,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
scale_embedding=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
|
class_definition
| 1,071 | 7,302 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/configuration_m2m_100.py
| null | 3,442 |
class M2M100OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
]
)
if self.use_past:
common_inputs["decoder_input_ids"] = {0: "batch"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction="inputs")
return common_inputs
# Copied from BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering
# A better name would be _generate_dummy_inputs_for_encoder_and_decoder because sequence classification and question
# answering are not supported for M2M100, but this name is preserved to be able to check that the copy matches what
# was done for BART so that it can be updated if need be.
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
)
# Generate dummy inputs according to compute batch and sequence
dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_default_and_seq2seq_lm
def _generate_dummy_inputs_for_default_and_seq2seq_lm(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
tokenizer, batch_size, seq_length, is_pair, framework
)
# Generate decoder inputs
decoder_seq_length = seq_length if not self.use_past else 1
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
tokenizer, batch_size, decoder_seq_length, is_pair, framework
)
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch, encoder_seq_length = common_inputs["input_ids"].shape
decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
encoder_shape = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
decoder_past_length = decoder_seq_length + 3
decoder_shape = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
common_inputs["decoder_attention_mask"] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
)
common_inputs["past_key_values"] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(min_num_layers):
common_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
# TODO: test this.
shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(min_num_layers, max_num_layers):
common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm
|
class_definition
| 7,305 | 13,361 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/configuration_m2m_100.py
| null | 3,443 |
class M2M100Tokenizer(PreTrainedTokenizer):
"""
Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
spm_file (`str`):
Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
language_codes (`str`, *optional*, defaults to `"m2m100"`):
What language codes to use. Should be one of `"m2m100"` or `"wmt21"`.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="ro")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
>>> outputs = model(**model_inputs) # should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
spm_file,
src_lang=None,
tgt_lang=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
language_codes="m2m100",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
num_madeup_words=8,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.language_codes = language_codes
fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes]
self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
additional_special_tokens = kwargs.pop("additional_special_tokens", [])
for lang_code in fairseq_language_code:
token = self.get_lang_token(lang_code)
if token not in additional_special_tokens and lang_code not in str(token) not in self.added_tokens_encoder:
additional_special_tokens.append(token)
self.vocab_file = vocab_file
self.encoder = load_json(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.spm_file = spm_file
self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
self.encoder_size = len(self.encoder)
self.lang_token_to_id = {
self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)
}
self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)}
self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
self._src_lang = src_lang if src_lang is not None else "en"
self.tgt_lang = tgt_lang
self.cur_lang_id = self.get_lang_id(self._src_lang)
self.num_madeup_words = num_madeup_words
super().__init__(
src_lang=src_lang,
tgt_lang=tgt_lang,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
language_codes=language_codes,
sp_model_kwargs=self.sp_model_kwargs,
additional_special_tokens=additional_special_tokens,
num_madeup_words=num_madeup_words,
**kwargs,
)
self.set_src_lang_special_tokens(self._src_lang)
@property
def vocab_size(self) -> int:
return len(self.encoder)
def get_vocab(self) -> Dict:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(token, self.encoder[self.unk_token])
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(current_sub_tokens) + token
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory")
vocab_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
spm_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, vocab_save_path)
if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
copyfile(self.spm_file, spm_save_path)
elif not os.path.isfile(self.spm_file):
with open(spm_save_path, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (str(vocab_save_path), str(spm_save_path))
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "en",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs)
tgt_lang_id = self.get_lang_id(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def _switch_to_input_mode(self):
self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
lang_token = self.get_lang_token(src_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang_token = self.get_lang_token(tgt_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def get_lang_token(self, lang: str) -> str:
return self.lang_code_to_token[lang]
def get_lang_id(self, lang: str) -> int:
lang_token = self.get_lang_token(lang)
return self.lang_token_to_id[lang_token]
|
class_definition
| 1,885 | 15,897 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/m2m_100/tokenization_m2m_100.py
| null | 3,444 |
class Olmo2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Olmo2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
class_definition
| 1,693 | 2,413 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,445 |
class Olmo2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
class_definition
| 5,692 | 9,577 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,446 |
class Olmo2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class_definition
| 9,580 | 10,248 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,447 |
class Olmo2DecoderLayer(nn.Module):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
self.mlp = Olmo2MLP(config)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class_definition
| 10,251 | 12,311 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,448 |
class Olmo2RotaryEmbedding(nn.Module):
def __init__(self, config: Olmo2Config, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
def _dynamic_frequency_update(self, position_ids, device):
"""
dynamic RoPE layers should recompute `inv_freq` in the following situations:
1 - growing beyond the cached sequence length (allow scaling)
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
"""
seq_len = torch.max(position_ids) + 1
if seq_len > self.max_seq_len_cached: # growth
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
self.max_seq_len_cached = seq_len
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
# This .to() is needed if the model has been moved to a device after being initialized (because
# the buffer is automatically moved, but not the original copy)
self.original_inv_freq = self.original_inv_freq.to(device)
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
self.max_seq_len_cached = self.original_max_seq_len
@torch.no_grad()
def forward(self, x, position_ids):
if "dynamic" in self.rope_type:
self._dynamic_frequency_update(position_ids, device=x.device)
# Core RoPE block
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
device_type = x.device.type
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
# Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
cos = cos * self.attention_scaling
sin = sin * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
class_definition
| 12,314 | 15,509 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,449 |
class Olmo2PreTrainedModel(PreTrainedModel):
config_class = Olmo2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Olmo2DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_cache_class = True
_supports_quantized_cache = True
_supports_static_cache = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
class_definition
| 16,531 | 17,454 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,450 |
class Olmo2Model(Olmo2PreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Olmo2DecoderLayer`]
Args:
config: Olmo2Config
"""
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Olmo2RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
@add_start_docstrings_to_model_forward(OLMO2_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
decoder_layer.__call__,
hidden_states,
causal_mask,
position_ids,
past_key_values,
output_attentions,
use_cache,
cache_position,
position_embeddings,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**flash_attn_kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
output = BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
return output if return_dict else output.to_tuple()
def _update_causal_mask(
self,
attention_mask: torch.Tensor,
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype, device = input_tensor.dtype, input_tensor.device
sequence_length = input_tensor.shape[1]
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
device=device,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type == "cuda"
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
device: torch.device,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to plcae the 4D attention mask on.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
|
class_definition
| 22,258 | 33,483 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,451 |
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
|
class_definition
| 33,486 | 33,548 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,452 |
class Olmo2ForCausalLM(Olmo2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
def __init__(self, config):
super().__init__(config)
self.model = Olmo2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(OLMO2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
num_logits_to_keep: int = 0,
**kwargs: Unpack[KwargsForCausalLM],
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
num_logits_to_keep (`int`, *optional*):
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, Olmo2ForCausalLM
>>> model = Olmo2ForCausalLM.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
class_definition
| 33,551 | 38,678 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modeling_olmo2.py
| null | 3,453 |
class Olmo2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "olmo2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50304,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
use_cache=True,
pad_token_id=1,
bos_token_id=None,
eos_token_id=50279,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
rms_norm_eps=1e-5,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
)
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
class_definition
| 870 | 8,561 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/configuration_olmo2.py
| null | 3,454 |
class Olmo2Config(OlmoConfig):
r"""
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "olmo2"
def __init__(
self,
vocab_size=50304,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
use_cache=True,
pad_token_id=1,
bos_token_id=None,
eos_token_id=50279,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
rms_norm_eps=1e-5,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_theta=rope_theta,
rope_scaling=rope_scaling,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
**kwargs,
)
self.rms_norm_eps = rms_norm_eps
del self.clip_qkv
|
class_definition
| 551 | 6,987 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,455 |
class Olmo2RMSNorm(LlamaRMSNorm):
pass
|
class_definition
| 6,990 | 7,032 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,456 |
class Olmo2Attention(OlmoAttention):
def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx=layer_idx)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
class_definition
| 7,208 | 10,076 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,457 |
class Olmo2DecoderLayer(OlmoDecoderLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__(config, layer_idx=layer_idx)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
del self.input_layernorm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class_definition
| 10,267 | 12,311 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,458 |
class Olmo2Model(OlmoModel):
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layers = nn.ModuleList(
[Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
|
class_definition
| 12,441 | 12,772 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,459 |
class Olmo2ForCausalLM(OlmoForCausalLM):
pass
|
class_definition
| 12,860 | 12,909 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/olmo2/modular_olmo2.py
| null | 3,460 |
class Emu3ImageProcessor(BaseImageProcessor):
r"""
Constructs a Emu3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
min_pixels (`int`, *optional*, defaults to `512 * 512`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `1024 * 1024`):
The max pixels of the image to resize the image.
spatial_factor (`int`, *optional*, defaults to 8):
The spatial downsample factor the image will be downsampled in feature extracting phase
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = True,
do_pad: bool = True,
min_pixels: int = 512 * 512,
max_pixels: int = 1024 * 1024,
spatial_factor: int = 8,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.spatial_factor = spatial_factor
self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: Union[ImageInput, VideoInput],
do_resize: bool = None,
resample: PILImageResampling = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`List[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=self.spatial_factor,
min_pixels=self.min_pixels,
max_pixels=self.max_pixels,
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
images = np.array(processed_images)
return images
def _pad_for_batching(
self,
pixel_values: List[np.ndarray],
image_sizes: List[List[int]],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`List[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
image_sizes (`List[List[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
List[`np.ndarray`]: The padded images.
"""
max_shape = (
max([size[0] for size in image_sizes]),
max([size[1] for size in image_sizes]),
)
pixel_values = [
pad(
image,
padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])),
data_format=data_format,
input_data_format=input_data_format,
)
for image, size in zip(pixel_values, image_sizes)
]
return pixel_values
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = None,
do_pad: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
if images is not None:
images = make_batched_images(images)
if images is not None and not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
pixel_values = []
for image in images:
image = self._preprocess(
image,
do_resize=do_resize,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(image)
image_sizes = [image.shape[-2:] for image in pixel_values]
if do_pad:
pixel_values = self._pad_for_batching(pixel_values, image_sizes)
pixel_values = np.array(pixel_values)
return BatchFeature(
data={"pixel_values": pixel_values, "image_sizes": image_sizes}, tensor_type=return_tensors
)
def postprocess(
self,
images: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
return_tensors: Union[str, TensorType] = "PIL.Image.Image",
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess.
The parameters should be same as in preprocess.
Args:
images (`ImageInput`):
Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_list_of_images(images)
if isinstance(images[0], Image.Image):
return images if len(images) > 1 else images[0]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
for image in images:
image = to_numpy_array(image)
if do_normalize:
image = self.unnormalize(
image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
image = image.clip(0, 255).astype(np.uint8)
if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format)
pixel_values.append(Image.fromarray(image))
else:
pixel_values.extend(image)
data = {"pixel_values": pixel_values}
return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
return BatchFeature(data=data, tensor_type=return_tensors)
def unnormalize(
self,
image: np.array,
image_mean: Union[float, Iterable[float]],
image_std: Union[float, Iterable[float]],
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.array:
"""
Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
image = (image * image_std) + image_mean
Args:
image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
Batch of pixel values to postprocess.
image_mean (`float` or `Iterable[float]`):
The mean to use for unnormalization.
image_std (`float` or `Iterable[float]`):
The standard deviation to use for unnormalization.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
num_channels = 3
if isinstance(image_mean, Iterable):
if len(image_mean) != num_channels:
raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}")
else:
image_mean = [image_mean] * num_channels
if isinstance(image_std, Iterable):
if len(image_std) != num_channels:
raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}")
else:
image_std = [image_std] * num_channels
rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std))
rev_image_std = tuple(1 / std for std in image_std)
image = self.normalize(
image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format
)
return image
|
class_definition
| 3,437 | 27,819 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/image_processing_emu3.py
| null | 3,461 |
class Emu3TextKwargs(TextKwargs, total=False):
return_for_image_generation: bool
|
class_definition
| 925 | 1,009 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/processing_emu3.py
| null | 3,462 |
class Emu3ImagesKwargs(ImagesKwargs, total=False):
ratio: str
image_area: int
|
class_definition
| 1,012 | 1,097 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/processing_emu3.py
| null | 3,463 |
class Emu3ProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: Emu3TextKwargs
images_kwargs: Emu3ImagesKwargs
_defaults = {
"text_kwargs": {
"return_for_image_generation": False,
},
"images_kwargs": {
"ratio": "1:1",
"image_area": 518400,
},
}
|
class_definition
| 1,100 | 1,435 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/processing_emu3.py
| null | 3,464 |
class Emu3Processor(ProcessorMixin):
r"""
Constructs a Emu3 processor which wraps a Emu3 image processor and a GPT2 tokenizer into a single
processor.
[`Emu3Processor`] offers all the functionalities of [`Emu3ImageProcessor`] and [`GPT2TokenizerFast`].
See the [`~Emu3Processor.__call__`] and [`~Emu3Processor.decode`] for more information.
Args:
image_processor ([`Emu3ImageProcessor`]):
The image processor is a required input.
tokenizer ([`Emu3TokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ["image_processor", "tokenizer"]
tokenizer_class = ("GPT2Tokenizer", "GPT2TokenizerFast")
image_processor_class = "Emu3ImageProcessor"
def __init__(
self,
image_processor,
tokenizer,
chat_template=None,
**kwargs,
):
self.image_token = tokenizer.image_token # image_token as placeholder to be replaced by vq-vae tokens
self.image_start_token = tokenizer.boi_token # "<|image start|>" fixed tokens for start and end of image
self.image_end_token = tokenizer.eoi_token # "<|image end|>"
self.fake_token_around_image = tokenizer.image_wrapper_token # "<|image token|>" every image starts with it
self.eof_token = tokenizer.eof_token # "<|extra_201|>"
self.bos_token = tokenizer.bos_token
self.downsample_ratio = 8
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
audio=None,
videos=None,
**kwargs: Unpack[Emu3ProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Emu3TokenizerFast's [`~Emu3TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
# check if images and text inputs are reversed for BC
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
output_kwargs = self._merge_kwargs(
Emu3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
return_for_image_generation = output_kwargs["text_kwargs"].pop("return_for_image_generation", False)
ratio = output_kwargs["images_kwargs"].pop("ratio", None)
image_area = output_kwargs["images_kwargs"].pop("image_area", None)
if return_for_image_generation and images is not None:
raise ValueError("You should not provide `images` when `return_for_image_generation=True`")
if not return_for_image_generation and text is None and images is None:
raise ValueError("You must provide either text or images when `return_for_image_generation=False`")
image_features = {}
image_start_tokens = f"{self.image_start_token}"
image_end_tokens = f"{self.eof_token}{self.image_end_token}"
# generate text from image + text input, so we add placeholders for image tokens
if not return_for_image_generation and images is not None:
image_features = self.image_processor(images, **output_kwargs["images_kwargs"])
image_sizes = iter(image_features.image_sizes)
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
height, width = image_size
height = height // self.downsample_ratio
width = width // self.downsample_ratio
image_seq_length = height * (width + 1) # +1 for extra row when converting to BPE in modeling code
image_placeholder = f"{image_start_tokens}{height}*{width}{self.fake_token_around_image}{'<placeholder>' * image_seq_length}{image_end_tokens}"
sample = sample.replace(self.image_token, image_placeholder, 1)
sample = f"{self.bos_token}{sample}" # add BOS because PT tokenizer doesn't add it
prompt_strings.append(sample)
text = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
# generate image from text input, so we add begin-of-image tokens from where image generation starts
elif return_for_image_generation:
height, width = self.calculate_generate_size(ratio, image_area, self.downsample_ratio)
image_prompt = f"{image_start_tokens}{height}*{width}{self.fake_token_around_image}"
text = [f"{self.bos_token}{sample}{image_prompt}" for sample in text]
image_features["image_sizes"] = [[height, width]] * len(text)
# else just generate from text-only input, and we do no special treatment for text
data = self.tokenizer(text, **output_kwargs["text_kwargs"])
data.update(**image_features)
return BatchFeature(data=data, tensor_type=output_kwargs["common_kwargs"]["return_tensors"])
def calculate_generate_size(self, ratio, image_area, spatial_factor):
width, height = map(int, ratio.split(":"))
current_area = width * height
target_ratio = (image_area / current_area) ** 0.5
token_height = int(round(height * target_ratio / spatial_factor))
token_width = int(round(width * target_ratio / spatial_factor))
return token_height, token_width
def postprocess(self, images: ImageInput, **kwargs):
return self.image_processor.postprocess(images, **kwargs)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Emu3TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Emu3TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
class_definition
| 1,438 | 10,384 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/processing_emu3.py
| null | 3,465 |
class Emu3VQVAEConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Emu3VQVAE`]. It is used to instantiate an VQ-VAE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a configuration to the VQ model presented in Emu3 paper.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
codebook_size (`int`, *optional*, defaults to 32768):
Codebook size of the VQ model.
embed_dim (`int`, *optional*, defaults to 4):
Dimension of the quantized vector in codebook.
latent_channels (`int`, *optional*, defaults to 4):
Dimension of the output channel of encoder and the input channel of decoder
double_latent (`bool`, *optional*, defaults to `False`):
Whether double the output dim of the encoder.
in_channels (`int`, *optional*, defaults to 3):
Input channel of encoder.
out_channels (`int`, *optional*, defaults to 3):
Output channel of decoder.
temporal_downsample_factor (`int`, *optional*, defaults to 4):
Temporal downsample factor.
base_channels (`int`, *optional*, defaults to 256):
Basic channel number of the intermediate blocks.
channel_multiplier (`List[int]`, *optional*, defaults to `[1, 2, 2, 4]`):
Channel scaling factor of the intermediate blocks.
num_res_blocks (`int`, *optional*, defaults to 2):
Residual block number in each stage.
attn_resolutions (`List[int]`, *optional*, defaults to `[3]`):
Stage indices to apply attention.
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations in the attention layer.
num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Emu3VQVAE, Emu3VQVAEConfig
>>> # Initializing a video VQ model of Emu3 configuration
>>> configuration = Emu3VQVAEConfig()
>>> # Initializing a model from the Emu3 VQ model style configuration
>>> model = Emu3VQVAE(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "emu3_vqgan"
base_config_key = "vq_config"
def __init__(
self,
codebook_size: int = 32768,
embed_dim: int = 4,
latent_channels: int = 4,
double_latent: bool = False,
in_channels: int = 3,
out_channels: int = 3,
temporal_downsample_factor: int = 4,
base_channels: int = 256,
channel_multiplier: List[int] = [1, 2, 2, 4],
num_res_blocks: int = 2,
attn_resolutions: List[int] = [3],
hidden_size: int = 1024,
num_attention_heads: int = 1,
attention_dropout: float = 0.0,
**kwargs,
):
super().__init__(**kwargs)
self.codebook_size = codebook_size
self.embed_dim = embed_dim
self.latent_channels = latent_channels
self.double_latent = double_latent
self.in_channels = in_channels
self.out_channels = out_channels
self.temporal_downsample_factor = temporal_downsample_factor
self.base_channels = base_channels
self.channel_multiplier = channel_multiplier
self.num_res_blocks = num_res_blocks
self.attn_resolutions = attn_resolutions
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
|
class_definition
| 785 | 4,735 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/configuration_emu3.py
| null | 3,466 |
class Emu3TextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Emu3TextModel`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 184622):
Vocabulary size of the Emu3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Emu3Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 9216):
The maximum sequence length that this model might ever be used with. Emu supports up to 9216 tokens,
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 151643):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 151849):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 151850):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`List[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`List[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
```python
>>> from transformers import Emu3Model, Emu3Config
>>> # Initializing a Emu3-community/Emu3-Chat-hf style configuration
>>> configuration = Emu3Config()
>>> # Initializing a model from the Emu3-community/Emu3-Chat-hf style configuration
>>> model = Emu3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "emu3_text_model"
base_config_key = "text_config"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int = 184622,
hidden_size: int = 4096,
intermediate_size: int = 14336,
num_hidden_layers: int = 32,
num_attention_heads: int = 32,
num_key_value_heads: Optional[int] = 8,
hidden_act: str = "silu",
max_position_embeddings: int = 9216,
rms_norm_eps: float = 1e-5,
use_cache: bool = True,
pad_token_id: int = 151643,
bos_token_id: int = 151849,
eos_token_id: int = 151850,
tie_word_embeddings: bool = False,
rope_theta: float = 1000000.0,
rope_scaling: Optional = None,
mlp_bias=False,
attention_bias=False,
attention_dropout: float = 0.1,
initializer_range: float = 0.02,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.mlp_bias = mlp_bias
self.attention_bias = attention_bias
self.initializer_range = initializer_range
rope_config_validation(self)
self.attention_dropout = attention_dropout
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
|
class_definition
| 4,738 | 14,024 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/configuration_emu3.py
| null | 3,467 |
class Emu3Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Emu3Model`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vq_config (`Union[Dict, Emu3VQVAEConfig]`, *optional*):
Emu3VQVAEConfig instance containing the configuration for the VQ-VAE model.
text_config (`Union[Dict, Emu3TextConfig]``, *optional*):
Emu3TextConfig instance containing the configuration for the language model.
vocabulary_map (`dict`, *optional*):
A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs.
"""
model_type = "emu3"
keys_to_ignore_at_inference = ["past_key_values"]
sub_configs = {"text_config": Emu3TextConfig, "vq_config": Emu3VQVAEConfig}
def __init__(
self,
vq_config: Union[Dict, Emu3VQVAEConfig] = None,
text_config: Union[Dict, Emu3TextConfig] = None,
vocabulary_map: Dict[int, int] = None,
**kwargs,
):
if vq_config is None:
vq_config = Emu3VQVAEConfig()
elif isinstance(vq_config, dict):
vq_config = Emu3VQVAEConfig(**vq_config)
if text_config is None:
text_config = Emu3TextConfig()
elif isinstance(text_config, dict):
text_config = Emu3TextConfig(**text_config)
self.vq_config = vq_config
self.text_config = text_config
self.vocabulary_map = vocabulary_map
super().__init__(**kwargs)
|
class_definition
| 14,027 | 16,006 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/configuration_emu3.py
| null | 3,468 |
class Emu3DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.dropout(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class_definition
| 1,822 | 4,891 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,469 |
class Emu3VQVAEVectorQuantizer(nn.Module):
"""
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
def forward(self, hidden_state: torch.Tensor):
batch_size, temporal, channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
hidden_state_flattened = hidden_state.view(-1, channels)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
hidden_state_sum = torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
embedding_sum = torch.sum(self.embedding.weight**2, dim=1)
# "bd,dn->bn",
distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
distances = hidden_state_sum + embedding_sum - distances
min_encoding_indices = torch.argmin(distances, dim=1)
min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
return min_encoding_indices
|
class_definition
| 4,894 | 6,561 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,470 |
class Emu3VQVAEEncoderConvDownsample(ChameleonVQVAEEncoderConvDownsample):
pass
|
class_definition
| 6,564 | 6,647 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,471 |
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states):
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 6,650 | 7,056 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,472 |
class Emu3VQVAEConv3d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: Tuple[int],
stride: Tuple[int],
):
super().__init__()
padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
self.padding = ()
for pad_size in padding_sizes[::-1]:
self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
self.padding += (2, 0)
self.conv = nn.Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
)
def forward(self, hidden_states: torch.Tensor):
hidden_states = F.pad(hidden_states, self.padding)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 7,059 | 7,893 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,473 |
class Emu3VQVAESpatialNorm(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
):
super().__init__()
self.norm_layer = nn.GroupNorm(
num_channels=out_channels,
num_groups=32,
eps=1e-6,
affine=True,
)
self.conv_y = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.conv_b = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode="nearest")
hidden_states = self.norm_layer(hidden_states)
hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
return hidden_states
|
class_definition
| 7,896 | 8,906 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,474 |
class Emu3VQVAETemporalUpsample(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
):
super().__init__()
self.conv = Emu3VQVAEConv3d(
in_channel,
out_channel,
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
)
def forward(self, hidden_states: torch.Tensor):
batch_size, channels, temporal, height, width = hidden_states.shape
hidden_states = hidden_states.permute(0, 1, 3, 4, 2).contiguous().view(batch_size, -1, temporal)
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
hidden_states = hidden_states.view(batch_size, channels, height, width, -1).permute(0, 1, 4, 2, 3).contiguous()
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 8,909 | 9,749 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,475 |
class Emu3VQVAETemporalDownsample(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
):
super().__init__()
self.conv = Emu3VQVAEConv3d(
in_channel,
out_channel,
kernel_size=(4, 3, 3),
stride=(2, 1, 1),
)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 9,752 | 10,206 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,476 |
class Emu3VQVAETemporalResnetBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels=None,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.norm1 = nn.BatchNorm3d(in_channels)
self.conv1 = Emu3VQVAEConv3d(
in_channels,
out_channels,
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
)
self.norm2 = nn.BatchNorm3d(out_channels)
self.conv2 = Emu3VQVAEConv3d(
out_channels,
out_channels,
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv3d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
def forward(self, hidden_states):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class_definition
| 10,209 | 11,693 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,477 |
class Emu3VQVAEResnetBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: Optional[int] = None,
quant_channels: Optional[int] = None,
):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.quant_channels = quant_channels
if quant_channels is None:
self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-6, affine=True)
else:
self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor] = None):
norm_args = () if self.quant_channels is None else (quant_channels,)
residual = hidden_states
hidden_states = self.norm1(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class_definition
| 11,696 | 13,795 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,478 |
class Emu3VQVAEAttentionBlock(SiglipAttention):
pass
|
class_definition
| 13,798 | 13,854 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,479 |
class Emu3VQVAEGroupNorm(nn.GroupNorm):
"""
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, quant_states=None):
return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
|
class_definition
| 13,857 | 14,338 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,480 |
class Emu3VQVAEMiddleBlock(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
super().__init__()
self.block_1 = Emu3VQVAEResnetBlock(
in_channels=in_channels,
out_channels=in_channels,
quant_channels=quant_channels,
)
self.attn_1 = Emu3VQVAEAttentionBlock(config)
if quant_channels is None:
self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
else:
self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.block_2 = Emu3VQVAEResnetBlock(
in_channels=in_channels,
out_channels=in_channels,
quant_channels=quant_channels,
)
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor = None):
hidden_states = self.block_1(hidden_states, quant_states)
residual = hidden_states
hidden_states = self.attn_norm(hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = self.attn_1(hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
hidden_states = self.block_2(hidden_states, quant_states)
return hidden_states
|
class_definition
| 14,341 | 15,843 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,481 |
class Emu3VQVAEDownBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
base_channels = config.base_channels
channel_multiplier = config.channel_multiplier
in_channel_multiplier = (1,) + tuple(channel_multiplier)
self.in_channel_multiplier = in_channel_multiplier
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_in = base_channels * in_channel_multiplier[i_level]
block_out = base_channels * channel_multiplier[i_level]
for i_block in range(self.num_res_blocks):
block.append(
Emu3VQVAEResnetBlock(
in_channels=block_in,
out_channels=block_out,
)
)
block_in = block_out
if config.attn_resolutions is not None and i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(nn.GroupNorm(num_channels=block_in, num_groups=32, eps=1e-6, affine=True))
down = nn.Module()
down.block = block
down.attn = attn
down.attn_norms = attn_norms
if i_level != self.num_resolutions - 1:
down.downsample = Emu3VQVAEEncoderConvDownsample(block_in)
self.down.append(down)
def forward(self, hidden_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.down):
for i_block in range(self.num_res_blocks):
hidden_states = blocks.block[i_block](hidden_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != self.num_resolutions - 1:
hidden_states = blocks.downsample(hidden_states)
return hidden_states
|
class_definition
| 15,846 | 18,472 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,482 |
class Emu3VQVAEUpBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_out = config.base_channels * config.channel_multiplier[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(
Emu3VQVAEResnetBlock(
in_channels=block_in,
out_channels=block_out,
quant_channels=quant_channels,
)
)
block_in = block_out
if i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
up = nn.Module()
up.block = block
up.attn = attn
up.attn_norms = attn_norms
if i_level != 0:
up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
self.up.insert(0, up)
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.up[::-1]):
for i_block in range(self.num_res_blocks + 1):
hidden_states = blocks.block[i_block](hidden_states, quant_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != len(self.up) - 1:
hidden_states = blocks.upsample(hidden_states)
return hidden_states
|
class_definition
| 18,475 | 20,955 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,483 |
class Emu3VQVAEEncoder(nn.Module):
def __init__(self, config):
super().__init__()
base_channels = config.base_channels
in_channels = config.in_channels
double_latent = config.double_latent
latent_channels = config.latent_channels
channel_multiplier = config.channel_multiplier
out_channels = 2 * latent_channels if double_latent else latent_channels
block_in = base_channels * channel_multiplier[-1]
self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
self.down_block = Emu3VQVAEDownBlock(config)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = torch.nn.Conv2d(
block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
self.time_res_stack = nn.ModuleList()
for i in range(temporal_down_blocks):
conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
self.time_conv.append(conv)
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(
in_channels=out_channels,
out_channels=out_channels,
)
self.time_res_stack.append(time_res_conv)
def forward(self, pixel_values: torch.LongTensor):
temporal_dim = pixel_values.shape[1]
pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
# downsampling & middle
hidden_states = self.conv_in(pixel_values)
hidden_states = self.down_block(hidden_states)
hidden_states = self.middle_block(hidden_states)
# end
hidden_states = self.norm_out(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
# temporal convs
for conv in self.time_conv:
hidden_states = conv(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
for layer in self.time_res_stack:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
return hidden_states
|
class_definition
| 20,958 | 23,565 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,484 |
class Emu3VQVAEDecoder(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.time_res_stack = nn.ModuleList()
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(
in_channels=config.latent_channels, out_channels=config.latent_channels
)
self.time_res_stack.append(time_res_conv)
temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
for i in range(temp_upsample_block_num):
conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
self.time_conv.append(conv)
self.conv_in = nn.Conv2d(
config.latent_channels,
block_in,
kernel_size=3,
stride=1,
padding=1,
)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
self.up_block = Emu3VQVAEUpBlock(config)
block_in = config.base_channels * config.channel_multiplier[0]
self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
self.conv_out = nn.Conv2d(
block_in,
config.out_channels,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
# temporal convs
for layer in self.time_res_stack:
hidden_quant_states = layer(hidden_quant_states)
for layer in self.time_conv:
hidden_quant_states = layer(hidden_quant_states)
hidden_quant_states *= torch.sigmoid(hidden_quant_states)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
hidden_states = self.conv_in(hidden_states)
# middle & upsampling
hidden_states = self.middle_block(hidden_states, quant_states)
hidden_states = self.up_block(hidden_states, quant_states)
hidden_states = self.norm_out(hidden_states, quant_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
return hidden_states
|
class_definition
| 23,568 | 26,307 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,485 |
class Emu3VQVAE(PreTrainedModel):
config_class = Emu3VQVAEConfig
base_model_prefix = "emuvideovq"
main_input_name = "pixel_values"
_no_split_modules = [
"Emu3VQVAETemporalResnetBlock",
"Emu3VQVAEAttentionBlock",
"Emu3VQVAEResnetBlock",
"Emu3VQVAEVectorQuantizer",
]
def _init_weights(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def __init__(self, config: Emu3VQVAEConfig):
super().__init__(config)
self.config = config
self.encoder = Emu3VQVAEEncoder(config)
self.decoder = Emu3VQVAEDecoder(config)
self.quantize = Emu3VQVAEVectorQuantizer(config)
self.vision_spatial_factor = 2 ** (len(config.channel_multiplier) - 1)
self.quant_conv = Emu3VQVAEConv3d(
config.latent_channels, config.embed_dim, kernel_size=(3, 1, 1), stride=(1, 1, 1)
)
self.post_quant_conv = Emu3VQVAEConv3d(
config.embed_dim, config.latent_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1)
)
self.spatial_scale_factor = 2 ** (len(config.channel_multiplier) - 1)
self.eval() # Emu3's VQ model is frozen
self.post_init()
def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
is_image = pixel_values.ndim == 4
if is_image:
temporal = self.config.temporal_downsample_factor
batch_size, channels, height, width = pixel_values.shape
pixel_values = pixel_values.unsqueeze(1).repeat(1, temporal, 1, 1, 1)
else:
batch_size, temporal, channels, height, width = pixel_values.shape
hidden_states = self.encoder(pixel_values)
# b t c h w -> b c t h w
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
hidden_states = self.quant_conv(hidden_states)
# b c t h w -> b t c h w
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
codes = self.quantize(hidden_states)
image_tokens = codes.squeeze(1) if is_image else codes
image_tokens = [
single_image[: int(size[0] / self.vision_spatial_factor), : int(size[1] / self.vision_spatial_factor)]
for single_image, size in zip(image_tokens, image_sizes)
]
return image_tokens
def decode(self, hidden_states: torch.Tensor):
is_image = hidden_states.ndim == 3
if is_image:
hidden_states = hidden_states.unsqueeze(1)
batch_size, temporal, height, width = hidden_states.shape
quant = self.quantize.embedding(hidden_states.flatten())
channels = quant.shape[-1]
quant = quant.view(batch_size, temporal, height, width, channels).permute(0, 4, 1, 2, 3).contiguous()
post_quant = self.post_quant_conv(quant)
quant = quant.permute(0, 2, 1, 3, 4)
post_quant = post_quant.permute(0, 2, 1, 3, 4)
video = self.decoder(post_quant, quant)
video = video.reshape(
batch_size,
temporal * self.config.temporal_downsample_factor,
self.config.out_channels,
height * self.spatial_scale_factor,
width * self.spatial_scale_factor,
)
return video[:, 0] if is_image else video
|
class_definition
| 27,579 | 31,473 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,486 |
class Emu3ImageVocabularyMapping:
"""
A class for mapping discrete image tokens from VQGAN to BPE tokens.
"""
def __init__(self, vocab_map):
self.vocab_map = vocab_map
self.eol_token_id = vocab_map.get("<|extra_200|>")
self.image_token_id = vocab_map.get("<image>")
@cached_property
def image_tokens(self):
return sorted([val for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
@cached_property
def image_tokens_str(self):
return sorted([name for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
@cached_property
def img2bpe(self):
return {int(token[-8:-2]): self.vocab_map[token] for token in self.image_tokens_str}
@cached_property
def bpe2img(self):
return {v: k for k, v in self.img2bpe.items()}
@cached_property
def bpe2img_mapping_tensor(self):
mapping = torch.zeros(max(self.bpe2img.keys()) + 1, dtype=torch.int)
for k, v in self.bpe2img.items():
mapping[k] = v
return mapping
@cached_property
def img2bpe_mapping_tensor(self):
mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
for k, v in self.img2bpe.items():
mapping[k] = v
return mapping
def convert_img2bpe(self, img_batch: List[torch.Tensor]) -> torch.Tensor:
device = img_batch.device
eol_row = torch.ones((img_batch.shape[0], 1), dtype=torch.int) * self.eol_token_id
img_tokens = self.img2bpe_mapping_tensor[img_batch.to("cpu")]
img_tokens = torch.cat([img_tokens, eol_row], dim=-1)
return img_tokens.to(device)
def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
device = img_batch.device
img_batch = img_batch[..., :-1] # remove last row of EOL tokens
img_tokens = self.bpe2img_mapping_tensor[img_batch.to("cpu")]
return img_tokens.to(device)
|
class_definition
| 31,476 | 33,455 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,487 |
class Emu3PreTrainedModel(ChameleonPreTrainedModel, Emu3VQVAE):
_no_split_modules = [
"Emu3DecoderLayer",
]
_supports_flex_attn = True
def _init_weights(self, module):
std = self.config.get_text_config().initializer_range
if isinstance(module, Emu3VQVAE):
module.apply(module._init_weights)
elif isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
class_definition
| 33,458 | 34,212 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,488 |
class Emu3TextModel(LlamaModel, Emu3PreTrainedModel):
def __init__(self, config: Emu3Config):
super().__init__(config)
self.layers = nn.ModuleList(
[Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
|
class_definition
| 43,704 | 43,980 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,489 |
class Emu3ForCausalLM(LlamaForCausalLM, Emu3PreTrainedModel, GenerationMixin):
config_class = Emu3TextConfig
def __init__(self, config):
super().__init__(config)
self.model = Emu3TextModel(config)
@add_start_docstrings_to_model_forward(EMU3_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="Emu3TextConfig")
def forward(**super_kwargs):
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
num_logits_to_keep (`int`, *optional*):
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
Returns:
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
super().forward()
|
class_definition
| 43,983 | 46,005 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,490 |
class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
_tied_weights_keys = ["text_model.lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.text_model = Emu3ForCausalLM._from_config(config.text_config)
self.vqmodel = Emu3VQVAE(config.vq_config)
self.vocabulary_mapping = Emu3ImageVocabularyMapping(config.vocabulary_map)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):
"""
Tokenizes images into discrete tokens with VQGAN module. Converts
obtained image tokens into BPE tokens and wraps with "boi" and "eoi"
special tokens.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
The sizes of the images in the batch, being (height, width) for each image.
"""
image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes)
bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list]
bpe_tokens = torch.cat(bpe_tokens_list)
return bpe_tokens
@torch.no_grad
def decode_image_tokens(self, image_tokens: torch.LongTensor, height: int, width: int):
"""
Decodes generated image tokens from language model to continuous pixel values
with VQGAN module via upsampling.
Args:
image_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):
The tensors corresponding to the input images.
height (`int`):
Height of the generated image before upsampling.
width (`int`):
Width of the generated image before upsampling.
"""
sequences = image_tokens[:, :-3].view(-1, height, width + 1)
image_tokens = self.vocabulary_mapping.convert_bpe2img(sequences)
image = self.vqmodel.decode(image_tokens)
return image
@add_start_docstrings_to_model_forward(EMU3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
image_sizes: torch.Tensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
num_logits_to_keep: int = 0,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
num_logits_to_keep (`int`, *optional*):
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
Returns:
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."},
... ],
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "Please describe the image."},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
>>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
)
if pixel_values is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
)
if pixel_values is not None:
image_tokens = self.get_image_tokens(pixel_values, image_sizes)
special_image_mask = input_ids == self.vocabulary_mapping.image_token_id
image_tokens = image_tokens.to(input_ids.device, input_ids.dtype)
input_ids = input_ids.masked_scatter(special_image_mask, image_tokens)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
num_logits_to_keep=num_logits_to_keep,
)
return outputs
|
class_definition
| 46,008 | 53,429 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modular_emu3.py
| null | 3,491 |
class Emu3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Emu3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
class_definition
| 2,429 | 3,147 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,492 |
class Emu3MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class_definition
| 3,150 | 3,847 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,493 |
class Emu3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
class_definition
| 7,126 | 10,691 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,494 |
class Emu3DecoderLayer(nn.Module):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Emu3Attention(config=config, layer_idx=layer_idx)
self.mlp = Emu3MLP(config)
self.input_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.dropout(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class_definition
| 10,694 | 14,081 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,495 |
class Emu3VQVAEVectorQuantizer(nn.Module):
"""
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
def forward(self, hidden_state: torch.Tensor):
batch_size, temporal, channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
hidden_state_flattened = hidden_state.view(-1, channels)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
hidden_state_sum = torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
embedding_sum = torch.sum(self.embedding.weight**2, dim=1)
# "bd,dn->bn",
distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
distances = hidden_state_sum + embedding_sum - distances
min_encoding_indices = torch.argmin(distances, dim=1)
min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
return min_encoding_indices
|
class_definition
| 14,084 | 15,751 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,496 |
class Emu3VQVAEEncoderConvDownsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
def forward(self, hidden_states):
# no asymmetric padding in torch conv, must do it ourselves
hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode="constant", value=0)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 15,754 | 16,232 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,497 |
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states):
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 16,235 | 16,641 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,498 |
class Emu3VQVAEConv3d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: Tuple[int],
stride: Tuple[int],
):
super().__init__()
padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
self.padding = ()
for pad_size in padding_sizes[::-1]:
self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
self.padding += (2, 0)
self.conv = nn.Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
)
def forward(self, hidden_states: torch.Tensor):
hidden_states = F.pad(hidden_states, self.padding)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class_definition
| 16,644 | 17,478 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/emu3/modeling_emu3.py
| null | 3,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.