text
stringlengths 31
243k
| type
stringclasses 1
value | start
int64 36
275k
| end
int64 286
280k
| depth
int64 0
1
| filepath
stringlengths 85
188
| parent_class
stringclasses 3
values | class_index
int64 0
10.8k
|
---|---|---|---|---|---|---|---|
class OriginalOneFormerConfigToProcessorConverter:
def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
if "ade20k" in model_repo:
class_info_file = "ade20k_panoptic.json"
elif "coco" in model_repo:
class_info_file = "coco_panoptic.json"
elif "cityscapes" in model_repo:
class_info_file = "cityscapes_panoptic.json"
else:
raise ValueError("Invalid Dataset!")
image_processor = OneFormerImageProcessor(
image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
size=model_input.MIN_SIZE_TEST,
max_size=model_input.MAX_SIZE_TEST,
num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
ignore_index=dataset_catalog.ignore_label,
class_info_file=class_info_file,
)
tokenizer = CLIPTokenizer.from_pretrained(model_repo)
return OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
task_seq_length=original_config.INPUT.TASK_SEQ_LEN,
max_seq_length=original_config.INPUT.MAX_SEQ_LEN,
)
|
class_definition
| 7,730 | 9,139 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py
| null | 5,100 |
class OriginalOneFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: OneFormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
# Swin Backbone
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = "pixel_level_module.encoder"
src_prefix: str = "backbone"
renamed_keys = [
(
f"{src_prefix}.patch_embed.proj.weight",
f"{dst_prefix}.embeddings.patch_embeddings.projection.weight",
),
(f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"),
(f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"),
(f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"),
]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(
[ # src, dst
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
),
]
)
# now we need to handle the attentions
# read in weights + bias of input projection layer of cross-attention
src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
] = src_att_weight[:offset, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
] = src_att_bias[:offset]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
] = src_att_weight[offset : offset * 2, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
] = src_att_bias[offset : offset * 2]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
] = src_att_weight[-offset:, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
] = src_att_bias[-offset:]
# let's pop them
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
# proj
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
),
]
)
# second norm
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
),
]
)
# mlp
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
),
]
)
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
)
]
)
if layer_idx < num_layers - 1:
# patch merging
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias",
),
]
)
# hidden states norms
renamed_keys.extend(
[
(
f"{src_prefix}.norm{layer_idx}.weight",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight",
),
(
f"{src_prefix}.norm{layer_idx}.bias",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias",
),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Dinat Backbone
def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = "pixel_level_module.encoder"
src_prefix: str = "backbone"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm")
for i in range(2):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.patch_embed.proj.{i}",
f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}",
)
)
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before",
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after",
)
)
renamed_keys.extend(
[ # src, dst
(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb",
),
]
)
# now we need to handle the attentions
# read in weights + bias of input projection layer of cross-attention
src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight"
] = src_att_weight[:offset, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias"
] = src_att_bias[:offset]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight"
] = src_att_weight[offset : offset * 2, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias"
] = src_att_bias[offset : offset * 2]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight"
] = src_att_weight[-offset:, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias"
] = src_att_bias[-offset:]
# let's pop them
src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
# proj
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense",
)
)
# mlp
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense",
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense",
)
)
if layer_idx < num_layers - 1:
# patch merging
renamed_keys.extend(
[
(
f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight",
),
(
f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight",
),
(
f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias",
),
]
)
# hidden states norms
renamed_keys.extend(
[
(
f"{src_prefix}.norm{layer_idx}.weight",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight",
),
(
f"{src_prefix}.norm{layer_idx}.bias",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias",
),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Backbone + Pixel Decoder
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool):
dst_prefix: str = "pixel_level_module.decoder"
src_prefix: str = "sem_seg_head.pixel_decoder"
if is_swin:
self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
else:
self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
self_attn_keys = []
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights")
)
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj")
)
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets")
)
self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj"))
return self_attn_keys
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
encoder_keys = []
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1"))
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2"))
encoder_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm")
)
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm"))
encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn"))
return encoder_keys
# convolution layer for final features
renamed_keys = [
(f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"),
(f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"),
(f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"),
]
renamed_keys.extend(
[
(f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"),
(f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"),
(f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"),
]
)
# proj layers
for i in range(3):
for j in range(2):
renamed_keys.extend(
[
(f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"),
(f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"),
]
)
renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")])
# layers
for layer_idx in range(self.config.encoder_layers):
renamed_keys.extend(
rename_keys_for_encoder_layer(
f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}"
)
)
# proj
renamed_keys.extend(
[
(f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
(f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Transformer Decoder
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module.decoder.layers"
src_prefix: str = "sem_seg_head.predictor"
for i in range(self.config.decoder_layers - 1):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = src_state_dict.pop(
f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight"
)
in_proj_bias = src_state_dict.pop(
f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias"
)
# next, add query, keys and values (in that order) to the state dict
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:]
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module"
src_prefix: str = "sem_seg_head.predictor"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [
(f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
(f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
]
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
attn_keys = []
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str):
query_transformer_layer_keys = []
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3")
)
query_transformer_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
)
query_transformer_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
)
return query_transformer_layer_keys
def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str):
cross_attn_layer_keys = []
cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
cross_attn_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
)
return cross_attn_layer_keys
def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str):
self_attn_layer_keys = []
self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
self_attn_layer_keys.extend(
rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
)
return self_attn_layer_keys
def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str):
ffn_layer_keys = []
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1"))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2"))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
return ffn_layer_keys
def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int):
transformer_decoder_layer_keys = []
transformer_decoder_layer_keys.extend(
rename_keys_for_cross_attn_layer(
f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn"
)
)
transformer_decoder_layer_keys.extend(
rename_keys_for_self_attn_layer(
f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn"
)
)
transformer_decoder_layer_keys.extend(
rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn")
)
return transformer_decoder_layer_keys
# positional embedding for object queries
renamed_keys = [
(f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
(f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"),
]
# norm
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm")
)
# proj
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection"
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed")
)
for i in range(3):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0"
)
)
# norm
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm"
)
)
# transformer to update queries with task tokens
for i in range(self.config.query_dec_layers):
renamed_keys.extend(
rename_keys_for_query_transformer_layer(
f"{src_prefix}.class_transformer.decoder.layers.{i}",
f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}",
)
)
# decoder layers
for i in range(self.config.decoder_layers - 1):
renamed_keys.extend(
rename_keys_for_transformer_decoder_layer(
f"{src_prefix}",
f"{dst_prefix}.decoder.layers",
i,
)
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "task_encoder"
src_prefix: str = "task_mlp"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = []
for i in range(2):
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0")
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "text_mapper.text_projector"
src_prefix: str = "text_projector"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = []
for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]):
renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0"))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "text_mapper.text_encoder"
src_prefix: str = "text_encoder"
self.replace_text_projector(dst_state_dict, src_state_dict)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [
(f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
(f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
]
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_layer(src_prefix: str, dst_prefix: str):
resblock_keys = []
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2"))
resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn"))
return resblock_keys
renamed_keys = [
("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"),
]
renamed_keys.extend(
[
(f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"),
(f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"),
]
)
renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final"))
for i in range(self.config.text_encoder_config["text_encoder_num_layers"]):
renamed_keys.extend(
rename_keys_for_layer(
f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}"
)
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel:
dst_state_dict = TrackedStateDict(oneformer.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin)
self.replace_transformer_module(dst_state_dict, src_state_dict)
self.replace_task_mlp(dst_state_dict, src_state_dict)
if self.config.is_training:
self.replace_text_mapper(dst_state_dict, src_state_dict)
logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
logger.info("🙌 Done")
oneformer.load_state_dict(dst_state_dict)
return oneformer
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]:
checkpoints: List[Path] = checkpoints_dir.glob("**/*.pth")
for checkpoint in checkpoints:
logger.info(f"💪 Converting {checkpoint.stem}")
# find associated config file
config: Path = config_dir / f"{checkpoint.stem}.yaml"
yield config, checkpoint
|
class_definition
| 9,142 | 41,080 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py
| null | 5,101 |
class OneFormerHungarianMatcher(nn.Module):
def __init__(
self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544
):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
num_points (int, *optional*, defaults to 12544):
Number of points to be sampled for dice and mask loss matching cost.
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and cost_dice == 0:
raise ValueError("All costs cant be 0")
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
self.num_points = num_points
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:
"""Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_targets).
"""
indices: List[Tuple[np.array]] = []
num_queries = class_queries_logits.shape[1]
preds_masks = masks_queries_logits
preds_probs = class_queries_logits
# iterate through batch size
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
pred_probs = pred_probs.softmax(-1)
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -pred_probs[:, labels]
pred_mask = pred_mask[:, None]
target_mask = target_mask[:, None].to(pred_mask.device)
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device)
# get ground truth labels
target_mask = sample_point(
target_mask,
point_coords.repeat(target_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
pred_mask = sample_point(
pred_mask,
point_coords.repeat(pred_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
with autocast(enabled=False):
pred_mask = pred_mask.float()
target_mask = target_mask.float()
# compute the sigmoid ce loss
cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)
# Compute the dice loss
cost_dice = pair_wise_dice_loss(pred_mask, target_mask)
# final cost matrix
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
cost_matrix = cost_matrix.reshape(num_queries, -1).cpu()
# do the assigmented using the hungarian algorithm in scipy
assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
# It could be stacked in one tensor
matched_indices = [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices
]
return matched_indices
|
class_definition
| 9,679 | 14,963 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,102 |
class OneFormerLoss(nn.Module):
def __init__(
self,
num_classes: int,
matcher: OneFormerHungarianMatcher,
weight_dict: Dict[str, float],
eos_coef: float,
num_points: int,
oversample_ratio: float,
importance_sample_ratio: float,
contrastive_temperature: float = None,
):
"""
This class computes the losses using the class predictions, mask predictions and the contrastive queries.
Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for
calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive
loss.
Args:
num_labels (`int`):
The number of classes.
matcher (`OneFormerHungarianMatcher`):
A torch module that computes the assigments between the predictions and labels.
weight_dict (`Dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
num_points (`int`):
Number of points to be sampled for dice and mask loss calculations.
oversample_ratio (`float`):
Required for pointwise loss calculation.
importance_sample_ratio (`float`):
Required for pointwise loss calculation.
contrastive_temperature (`float`):
Temperature for scaling the contrastive logits.
"""
requires_backends(self, ["scipy"])
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.contrastive_temperature = contrastive_temperature
if self.contrastive_temperature is not None:
self.logit_scale = nn.Parameter(torch.tensor(np.log(1 / contrastive_temperature)))
def _max_by_axis(self, the_list: List[List[int]]) -> List[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]:
# get the maximum size in the batch
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
batch_size = len(tensors)
# compute finel size
batch_shape = [batch_size] + max_size
b, _, h, w = batch_shape
# get metadata
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device)
# pad the tensors to the size of the biggest one
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor)
padding_mask[: tensor.shape[1], : tensor.shape[2]] = False
return padded_tensors, padding_masks
def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):
"""Compute the query-text contrastive loss.
Args:
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries
and text queries derived from input text list.
"""
image_queries = contrastive_queries_logits.float()
# [batch_size, hidden_dim]
image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)
text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)
logit_scale = torch.clamp(self.logit_scale.exp(), max=100)
logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale
logits_per_img = logits_per_text.t()
loss_img = nn.functional.cross_entropy(
logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device)
)
loss_text = nn.functional.cross_entropy(
logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device)
)
loss_contrastive = loss_img + loss_text
losses = {"loss_contrastive": loss_contrastive}
return losses
def loss_labels(
self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]
) -> Dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`List[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
# shape = (batch_size, num_queries)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
# shape = (batch_size, num_queries)
target_classes = torch.full(
(batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device
)
target_classes[idx] = target_classes_o
# permute pred_logits (batch_size, num_queries, num_labels) -> (batch_size, num_labels, num_queries)
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {"loss_cross_entropy": loss_ce}
return losses
def loss_masks(
self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int
) -> Dict[str, Tensor]:
"""Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
# shape (batch_size * num_queries, height, width)
pred_masks = masks_queries_logits[src_idx]
# shape (batch_size, num_queries, height, width)
# pad all and stack the targets to the num_labels dimension
# upsample predictions to the target size, we have to add one dim to use interpolate
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
# sample point_coords
point_coords = self.sample_points_using_uncertainty(
pred_masks,
self.calculate_uncertainty,
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
# get ground-truth labels
point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1)
losses = {
"loss_mask": sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks),
"loss_dice": dice_loss(point_logits, point_labels, num_masks),
}
del pred_masks
del target_masks
return losses
# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.calculate_uncertainty
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
"""
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
"""
uncertainty_scores = -(torch.abs(logits))
return uncertainty_scores
# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.sample_points_using_uncertainty
def sample_points_using_uncertainty(
self,
logits: torch.Tensor,
uncertainty_function,
num_points: int,
oversample_ratio: int,
importance_sample_ratio: float,
) -> torch.Tensor:
"""
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
"""
num_boxes = logits.shape[0]
num_points_sampled = int(num_points * oversample_ratio)
# Get random point coordinates
point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)
# Get sampled prediction value for the point coordinates
point_logits = sample_point(logits, point_coordinates, align_corners=False)
# Calculate the uncertainties based on the sampled prediction values of the points
point_uncertainties = uncertainty_function(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)
idx += shift[:, None]
point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)
if num_random_points > 0:
point_coordinates = torch.cat(
[point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)],
dim=1,
)
return point_coordinates
def _get_predictions_permutation_indices(self, indices):
# permute predictions following indices
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for (src, _) in indices])
return batch_indices, predictions_indices
def _get_targets_permutation_indices(self, indices):
# permute labels following indices
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for (_, tgt) in indices])
return batch_indices, target_indices
def forward(
self,
masks_queries_logits: Tensor,
class_queries_logits: Tensor,
contrastive_queries_logits: Tensor,
mask_labels: List[Tensor],
class_labels: List[Tensor],
text_queries: Tensor,
auxiliary_predictions: Optional[Dict[str, Tensor]] = None,
calculate_contrastive_loss: bool = True,
) -> Dict[str, Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`List[torch.Tensor]`):
List of class labels of shape `(labels)`.
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
calculate_contrastive_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the contrastive loss.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
- **loss_contrastive** -- The query-text contrstive loss computed using object and text queries.
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains addional losses
for each auxiliary predictions.
"""
# retrieve the matching between the outputs of the last layer and the labels
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
# compute the average number of target masks for normalization purposes
num_masks = self.get_num_masks(class_labels, device=class_labels[0].device)
# get all the losses
losses: Dict[str, Tensor] = {
**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks),
**self.loss_labels(class_queries_logits, class_labels, indices),
}
if calculate_contrastive_loss:
losses = {**losses, **self.loss_contrastive(contrastive_queries_logits, text_queries)}
# in case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs["masks_queries_logits"]
class_queries_logits = aux_outputs["class_queries_logits"]
loss_dict = self.forward(
masks_queries_logits,
class_queries_logits,
None,
mask_labels,
class_labels,
None,
calculate_contrastive_loss=False,
)
loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum([len(classes) for classes in class_labels])
num_masks = torch.as_tensor([num_masks], dtype=torch.float, device=device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_masks = reduce(num_masks)
world_size = PartialState().num_processes
num_masks = torch.clamp(num_masks / world_size, min=1)
return num_masks
|
class_definition
| 14,966 | 33,133 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,103 |
class OneFormerTransformerDecoderOutput(BaseModelOutput):
"""
Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask
predictions and contrastive logits to BaseModelOutputWithCrossAttentions.
Args:
object_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Queries representation for the region proposals.
contrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Queries representation for the contrastive loss.
prediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask predictions from last layer of the transformer decoder.
prediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class predictions from last layer of the transformer decoder.
auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
"""
object_queries: torch.FloatTensor = None
contrastive_logits: Optional[torch.FloatTensor] = None
prediction_masks: torch.FloatTensor = None
prediction_class: torch.FloatTensor = None
auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None
|
class_definition
| 33,147 | 34,539 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,104 |
class OneFormerPixelDecoderOutput(ModelOutput):
"""
OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns
the mask features and the multiscale features.
Args:
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
"""
multi_scale_features: Tuple[torch.FloatTensor] = None
mask_features: torch.FloatTensor = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 34,666 | 35,857 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,105 |
class OneFormerPixelLevelModuleOutput(ModelOutput):
"""
OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the
`encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale
Deformable Attention based decoder.
Args:
encoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
"""
encoder_features: List[torch.FloatTensor] = None
decoder_features: List[torch.FloatTensor] = None
decoder_last_feature: torch.FloatTensor = None
|
class_definition
| 35,871 | 37,038 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,106 |
class OneFormerModelOutput(ModelOutput):
"""
Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits.
Args:
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`)
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`)
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`)
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: torch.FloatTensor = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: torch.FloatTensor = None
transformer_decoder_class_predictions: torch.FloatTensor = None
transformer_decoder_auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: torch.FloatTensor = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 37,052 | 41,059 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,107 |
class OneFormerForUniversalSegmentationOutput(ModelOutput):
"""
Class for outputs of [`OneFormerForUniversalSegmentationOutput`].
This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or
[`~OneFormerImageProcessor.post_process_instance_segmentation`] or
[`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see
[`~OneFormerImageProcessor] for details regarding usage.
Args:
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`)
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`)
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`)
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
loss: Optional[torch.FloatTensor] = None
class_queries_logits: torch.FloatTensor = None
masks_queries_logits: torch.FloatTensor = None
auxiliary_predictions: List[Dict[str, torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[List[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: torch.FloatTensor = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: torch.FloatTensor = None
transformer_decoder_class_predictions: torch.FloatTensor = None
transformer_decoder_auxiliary_predictions: Optional[List[Dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: torch.FloatTensor = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
class_definition
| 41,073 | 46,339 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,108 |
class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
|
class_definition
| 46,494 | 47,947 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,109 |
class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}"
)
dim_per_head = embed_dim // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 128
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = nn.functional.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
# PyTorch implementation
output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output, attention_weights
|
class_definition
| 48,110 | 52,486 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,110 |
class OneFormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.conv_dim
self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
n_levels=3,
n_points=4,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.is_training = config.is_training
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: torch.Tensor = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.is_training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class_definition
| 52,489 | 56,395 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,111 |
class OneFormerPixelDecoderEncoderOnly(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`OneFormerPixelDecoderEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: OneFormerConfig
"""
def __init__(self, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layers = nn.ModuleList([OneFormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)])
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for lvl, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device),
)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
|
class_definition
| 56,552 | 62,423 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,112 |
class OneFormerPixelDecoder(nn.Module):
def __init__(self, config: OneFormerConfig, feature_channels):
super().__init__()
self.config = config
# positional encoding
self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True)
self.num_feature_levels = 3
transformer_in_channels = feature_channels[-self.num_feature_levels :]
self.transformer_feature_strides = config.strides[-self.num_feature_levels :]
self.feature_channels = feature_channels
self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim))
# Create input projection layers
if self.num_feature_levels > 1:
input_projections_list = []
for in_channels in transformer_in_channels[::-1]:
input_projections_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
)
self.input_projections = nn.ModuleList(input_projections_list)
else:
self.input_projections = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
]
)
self.encoder = OneFormerPixelDecoderEncoderOnly(config)
self.mask_projection = nn.Conv2d(
config.conv_dim,
config.mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
self.common_stride = config.common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
for idx, in_channels in enumerate(self.feature_channels[: self.num_fpn_levels]):
lateral_conv = nn.Sequential(
nn.Conv2d(
in_channels,
config.conv_dim,
kernel_size=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
)
output_conv = nn.Sequential(
nn.Conv2d(
config.conv_dim,
config.conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
nn.ReLU(),
)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
def get_valid_ratio(self, mask, dtype=torch.float32):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(~mask[:, :, 0], 1)
valid_width = torch.sum(~mask[:, 0, :], 1)
valid_ratio_heigth = valid_height.to(dtype) / height
valid_ratio_width = valid_width.to(dtype) / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1)
return valid_ratio
def forward(
self,
features,
encoder_outputs=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
position_embeddings_list = []
for level, source in enumerate(features[::-1][: self.num_feature_levels]):
sources.append(self.input_projections[level](source))
position_embeddings_list.append(self.position_embedding(source))
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources]
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=source_flatten,
attention_mask=mask_flatten,
position_embeddings=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
y = encoder_outputs.last_hidden_state
bs = y.shape[0]
split_size_or_sections = [None] * self.num_feature_levels
for i in range(self.num_feature_levels):
if i < self.num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, feats in enumerate(features[: self.num_fpn_levels][::-1]):
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(feats)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + nn.functional.interpolate(
out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False
)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return OneFormerPixelDecoderOutput(
mask_features=self.mask_projection(out[-1]),
multi_scale_features=multi_scale_features,
attentions=encoder_outputs.attentions,
)
|
class_definition
| 62,540 | 70,955 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,113 |
class OneFormerPixelLevelModule(nn.Module):
def __init__(self, config: OneFormerConfig):
"""
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://arxiv.org/abs/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`OneFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
self.encoder = load_backbone(config)
self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels)
def forward(self, pixel_values: Tensor, output_hidden_states: bool = False) -> OneFormerPixelLevelModuleOutput:
features: List[Tensor] = self.encoder(pixel_values).feature_maps
decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states)
return OneFormerPixelLevelModuleOutput(
encoder_features=tuple(features),
decoder_features=decoder_output.multi_scale_features,
decoder_last_feature=decoder_output.mask_features,
)
|
class_definition
| 71,076 | 72,304 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,114 |
class OneFormerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
key_value_states: Optional[torch.Tensor] = None,
key_value_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None
position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None
key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None
key_value_position_embeddings = (
key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None
)
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# add key-value position embeddings to the key value states
if key_value_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is"
f" {attention_mask.size()}"
)
attn_weights += attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output).permute(1, 0, 2)
return attn_output, attn_weights_reshaped
|
class_definition
| 72,397 | 78,699 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,115 |
class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module):
def __init__(
self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05
):
super().__init__()
self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True)
self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.self_attn(
hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.self_attn(
hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos)
return self.forward_post(output, output_mask, output_key_padding_mask, query_pos)
|
class_definition
| 78,702 | 80,815 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,116 |
class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module):
def __init__(
self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05
):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
|
class_definition
| 80,818 | 83,315 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,117 |
class OneFormerTransformerDecoderFFNLayer(nn.Module):
def __init__(
self,
d_model,
dim_feedforward=2048,
dropout=0.0,
activation="relu",
normalize_before=False,
layer_norm_eps=1e-05,
):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, output):
output2 = self.linear2(self.dropout(self.activation(self.linear1(output))))
output = output + self.dropout(output2)
output = self.norm(output)
return output
def forward_pre(self, output):
output2 = self.norm(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output2))))
output = output + self.dropout(output2)
return output
def forward(self, output):
if self.normalize_before:
return self.forward_pre(output)
return self.forward_post(output)
|
class_definition
| 83,318 | 84,680 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,118 |
class OneFormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
layers.append(
PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity())
)
self.layers = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
return self.layers(input)
|
class_definition
| 84,683 | 85,746 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,119 |
class OneFormerTransformerDecoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.hidden_dim
self.num_feature_levels = 3
self.cross_attn = OneFormerTransformerDecoderCrossAttentionLayer(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=0.0,
normalize_before=config.pre_norm,
layer_norm_eps=config.layer_norm_eps,
)
self.self_attn = OneFormerTransformerDecoderSelfAttentionLayer(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=0.0,
normalize_before=config.pre_norm,
layer_norm_eps=config.layer_norm_eps,
)
self.ffn = OneFormerTransformerDecoderFFNLayer(
d_model=self.embed_dim,
dim_feedforward=config.dim_feedforward,
dropout=0.0,
normalize_before=config.pre_norm,
layer_norm_eps=config.layer_norm_eps,
)
def forward(
self,
index: int,
output: torch.Tensor,
multi_stage_features: List[torch.Tensor],
multi_stage_positional_embeddings: List[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
query_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
index (`int`): index of the layer in the Transformer decoder.
output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`
multi_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder.
multi_stage_positional_embeddings (`List[torch.Tensor]`):
positional embeddings for the multi_stage_features
attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer
query_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys in the self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
level_index = index % self.num_feature_levels
attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False
# Masked Cross Attention
output, cross_attn_weights = self.cross_attn(
output,
multi_stage_features[level_index],
memory_mask=attention_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=multi_stage_positional_embeddings[level_index],
query_pos=query_embeddings,
)
# Self Attention
output, self_attn_weights = self.self_attn(
output,
output_mask=None,
output_key_padding_mask=None,
query_pos=query_embeddings,
)
# Fully Connected
output = self.ffn(output)
outputs = (output,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class_definition
| 85,791 | 89,136 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,120 |
class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
output_mask=output_mask,
memory_mask=memory_mask,
output_key_padding_mask=output_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
|
class_definition
| 89,139 | 90,696 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,121 |
class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
layer_norm_eps=1e-05,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(output, query_pos)
output2 = self.self_attn(q, k, value=output, attn_mask=output_mask, key_padding_mask=output_key_padding_mask)
output2 = output2[0]
output = output + self.dropout1(output2)
output = self.norm1(output)
output2 = self.multihead_attn(
query=self.with_pos_embed(output, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output2 = output2[0]
output = output + self.dropout2(output2)
output = self.norm2(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output))))
output = output + self.dropout3(output2)
output = self.norm3(output)
return output
def forward_pre(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm1(output)
q = k = self.with_pos_embed(output2, query_pos)
output2 = self.self_attn(q, k, value=output2, attn_mask=output_mask, key_padding_mask=output_key_padding_mask)
output2 = output2[0]
output = output + self.dropout1(output2)
output2 = self.norm2(output)
output2 = self.multihead_attn(
query=self.with_pos_embed(output2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output2 = output2[0]
output = output + self.dropout2(output2)
output2 = self.norm3(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output2))))
output = output + self.dropout3(output2)
return output
def forward(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
output,
memory,
output_mask,
memory_mask,
output_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
output,
memory,
output_mask,
memory_mask,
output_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
|
class_definition
| 90,699 | 95,312 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,122 |
class OneFormerTransformerDecoderQueryTransformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
layer_norm_eps=1e-05,
):
super().__init__()
decoder_layer = OneFormerTransformerDecoderQueryTransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before, layer_norm_eps
)
decoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.decoder = OneFormerTransformerDecoderQueryTransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self.d_model = d_model
self.nhead = nhead
def forward(self, src, mask, query_embed, pos_embed, task_token=None):
batch_size = src.shape[0]
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1)
if mask is not None:
mask = mask.flatten(1)
if task_token is None:
queries = torch.zeros_like(query_embed)
else:
queries = task_token.repeat(query_embed.shape[0], 1, 1)
queries = self.decoder(queries, src, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed)
return queries.transpose(1, 2)
|
class_definition
| 95,315 | 96,900 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,123 |
class OneFormerTransformerDecoder(nn.Module):
"""
Transformer decoder
"""
def __init__(self, in_channels: int, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.num_heads = config.num_attention_heads
self.is_training = config.is_training
self.use_task_norm = config.use_task_norm
self.use_auxiliary_loss = config.use_auxiliary_loss
self.query_transformer = OneFormerTransformerDecoderQueryTransformer(
d_model=config.hidden_dim,
dropout=config.dropout,
nhead=config.num_attention_heads,
dim_feedforward=config.dim_feedforward,
num_decoder_layers=config.query_dec_layers,
normalize_before=config.pre_norm,
return_intermediate_dec=False,
layer_norm_eps=config.layer_norm_eps,
)
self.decoder_norm = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_eps)
self.num_feature_levels = 3
self.layers = nn.ModuleList(
[OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)]
)
self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1)
self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1)
self.mask_embed = OneFormerMLPPredictionHead(
config.hidden_dim,
config.hidden_dim,
config.mask_dim,
3,
)
def forward(
self,
task_token=None,
multi_stage_features=None,
multi_stage_positional_embeddings=None,
mask_features=None,
query_features=None,
query_embeddings=None,
query_embedder=None,
size_list=None,
output_attentions=None,
):
if self.use_task_norm:
task_token = self.decoder_norm(task_token)
object_queries = self.query_transformer(
query_features,
None,
query_embedder.weight[:-1],
self.query_input_projection(mask_features),
task_token if self.use_task_norm else None,
)
object_queries = object_queries[0].permute(1, 0, 2)
queries = torch.cat([object_queries, task_token], dim=0)
output = queries.clone()
intermediate_class_predictions = []
intermediate_mask_predictions = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[0]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
attentions = ()
for index, layer in enumerate(self.layers):
layer_outputs = layer(
index=index,
output=output,
multi_stage_features=multi_stage_features,
multi_stage_positional_embeddings=multi_stage_positional_embeddings,
attention_mask=attention_mask,
query_embeddings=query_embeddings,
output_attentions=output_attentions,
)
output = layer_outputs[0]
attentions += (layer_outputs[1:],)
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
if not len(intermediate_mask_predictions) == len(self.layers) + 1:
raise ValueError(
"Intermediate predictions in the transformer decoder must have the same number of elements as number"
" of layers"
)
object_queries = layer_outputs[0].permute(1, 0, 2)
contrastive_logits = queries.permute(1, 0, 2)
return OneFormerTransformerDecoderOutput(
object_queries=object_queries,
contrastive_logits=contrastive_logits,
prediction_masks=intermediate_mask_predictions[-1],
prediction_class=intermediate_class_predictions[-1],
auxiliary_predictions=self._get_aux_predictions(
intermediate_class_predictions, intermediate_mask_predictions
)
if self.use_auxiliary_loss
else None,
attentions=attentions,
)
def forward_prediction_heads(self, output, mask_features, attention_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
attention_mask = nn.functional.interpolate(
outputs_mask, size=attention_mask_target_size, mode="bilinear", align_corners=False
)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attention_mask = (
attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5
).bool()
attention_mask = attention_mask.detach()
return outputs_class, outputs_mask, attention_mask
@torch.jit.unused
def _get_aux_predictions(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
aux_list = [
{"class_queries_logits": a, "masks_queries_logits": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
return tuple(aux_list)
|
class_definition
| 96,903 | 102,989 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,124 |
class OneFormerTransformerModule(nn.Module):
"""
The OneFormer's transformer module.
"""
def __init__(self, in_features: int, config: OneFormerConfig):
super().__init__()
hidden_dim = config.hidden_dim
self.num_feature_levels = 3
self.position_embedder = OneFormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True)
self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim)
self.input_projections = []
for _ in range(self.num_feature_levels):
if in_features != hidden_dim or config.enforce_input_proj:
self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1))
else:
self.input_projections.append(nn.Sequential())
self.decoder = OneFormerTransformerDecoder(in_channels=in_features, config=config)
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
def forward(
self,
multi_scale_features: List[Tensor],
mask_features: Tensor,
task_token: Tensor,
output_attentions: bool = False,
) -> OneFormerTransformerDecoderOutput:
if not len(multi_scale_features) == self.num_feature_levels:
raise ValueError(
f"Number of elements in multi_scale_features ({len(multi_scale_features)}) and num_feature_levels"
f" ({self.num_feature_levels}) do not match!"
)
multi_stage_features = []
multi_stage_positional_embeddings = []
size_list = []
for i in range(self.num_feature_levels):
size_list.append(multi_scale_features[i].shape[-2:])
multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i], None).flatten(2))
multi_stage_features.append(
self.input_projections[i](multi_scale_features[i]).flatten(2)
+ self.level_embed.weight[i][None, :, None]
)
# flatten NxCxHxW to HWxNxC
multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1)
multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1)
_, batch_size, _ = multi_stage_features[0].shape
# QxNxC
query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1)
task_token = task_token.unsqueeze(0)
query_features = self.position_embedder(mask_features, None)
return self.decoder(
task_token=task_token,
multi_stage_features=multi_stage_features,
multi_stage_positional_embeddings=multi_stage_positional_embeddings,
mask_features=mask_features,
query_features=query_features,
query_embeddings=query_embeddings,
query_embedder=self.queries_embedder,
size_list=size_list,
output_attentions=output_attentions,
)
|
class_definition
| 102,992 | 105,982 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,125 |
class OneFormerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor:
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = (~mask).to(x.dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).type_as(x)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class_definition
| 106,097 | 107,909 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,126 |
class PredictionBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None:
super().__init__()
self.layers = [nn.Linear(in_dim, out_dim), activation]
# Maintain submodule indexing as if part of a Sequential block
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class_definition
| 107,993 | 108,547 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,127 |
class OneFormerTextMapperAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, q, k, v):
batch_size, q_sequence_length, num_channels = q.shape
if not k.shape == v.shape:
raise ValueError(f"keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!")
batch_size, k_sequence_length, num_channels = k.shape
q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads)
k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
attn = torch.einsum("bnkc,bmkc->bknm", q, k) * self.scale
attn = attn.softmax(dim=-1)
output = torch.einsum("bknm,bmkc->bnkc", attn, v).reshape(batch_size, q_sequence_length, num_channels)
output = self.proj(output)
output = self.proj_drop(output)
return output
|
class_definition
| 108,550 | 110,218 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,128 |
class OneFormerTextTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dropout=0.1,
layer_norm_eps=1e-05,
):
super().__init__()
self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.mlp = nn.Sequential(
nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model)
)
def forward(self, hidden_state, mem):
q = k = v = self.norm1(hidden_state)
hidden_state = hidden_state + self.self_attn(q, k, v)
q = self.norm2(hidden_state)
hidden_state = hidden_state + self.cross_attn(q, mem, mem)
hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state)))
return hidden_state
|
class_definition
| 110,221 | 111,359 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,129 |
class OneFormerTextContextDecoder(nn.Module):
def __init__(
self,
transformer_width=256,
transformer_heads=4,
transformer_layers=6,
visual_dim=1024,
dropout=0.1,
layer_norm_eps=1e-05,
**kwargs,
):
super().__init__()
self.memory_proj = nn.Sequential(
nn.LayerNorm(visual_dim, eps=layer_norm_eps),
nn.Linear(visual_dim, transformer_width),
nn.LayerNorm(transformer_width, eps=layer_norm_eps),
)
self.text_proj = nn.Sequential(
nn.LayerNorm(visual_dim, eps=layer_norm_eps),
nn.Linear(visual_dim, transformer_width),
)
self.decoder = nn.ModuleList(
[
OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout, layer_norm_eps)
for _ in range(transformer_layers)
]
)
self.out_proj = nn.Sequential(
nn.LayerNorm(transformer_width, eps=layer_norm_eps), nn.Linear(transformer_width, visual_dim)
)
def forward(self, text, visual):
visual = self.memory_proj(visual)
hidden_state = self.text_proj(text)
for layer in self.decoder:
hidden_state = layer(hidden_state, visual)
return self.out_proj(hidden_state)
|
class_definition
| 111,362 | 112,709 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,130 |
class OneFormerTextMLP(nn.Module):
def __init__(
self,
hidden_size: Optional[int] = None,
intermediate_size: Optional[int] = None,
output_size: Optional[int] = None,
):
super().__init__()
self.activation_fn = ACT2FN["quick_gelu"]
hidden_size = hidden_size
intermediate_size = intermediate_size
output_size = output_size
self.fc1 = nn.Linear(hidden_size, intermediate_size)
self.fc2 = nn.Linear(intermediate_size, output_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class_definition
| 112,712 | 113,485 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,131 |
class OneFormerTextTransformerLayer(nn.Module):
def __init__(self, width: int, heads: int, attn_mask: torch.Tensor, layer_norm_eps=1e-05):
super().__init__()
self.self_attn = nn.MultiheadAttention(width, heads)
self.layer_norm1 = nn.LayerNorm(width, eps=layer_norm_eps)
self.mlp = OneFormerTextMLP(width, width * 4, width)
self.layer_norm2 = nn.LayerNorm(width, eps=layer_norm_eps)
self.attn_mask = attn_mask
def forward(
self,
hidden_states: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.self_attn(
hidden_states,
hidden_states,
hidden_states,
need_weights=False,
key_padding_mask=key_padding_mask,
)[0]
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class_definition
| 113,488 | 114,671 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,132 |
class OneFormerTextTransformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None,
use_checkpoint=False,
layer_norm_eps=1e-05,
):
super().__init__()
self.width = width
self.num_layers = layers
self.layers = nn.Sequential(
*[OneFormerTextTransformerLayer(width, heads, attn_mask, layer_norm_eps) for _ in range(layers)]
)
self.use_checkpoint = use_checkpoint
def forward(self, hidden_states: torch.Tensor):
for layer in self.layers:
if self.use_checkpoint:
hidden_states = self._gradient_checkpointing_func(layer, hidden_states)
else:
hidden_states = layer(hidden_states)
return hidden_states
|
class_definition
| 114,674 | 115,515 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,133 |
class OneFormerTextEncoder(nn.Module):
def __init__(
self,
context_length: int,
width: int,
layers: int,
vocab_size,
use_checkpoint=False,
layer_norm_eps=1e-05,
):
super().__init__()
heads = width // 64
self.context_length = context_length
self.width = width
self.transformer = OneFormerTextTransformer(
width=width,
layers=layers,
heads=heads,
attn_mask=self.build_attention_mask(),
use_checkpoint=use_checkpoint,
layer_norm_eps=layer_norm_eps,
)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
self.ln_final = nn.LayerNorm(width, eps=layer_norm_eps)
self.token_embedding = nn.Embedding(vocab_size, width)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, text):
hidden_state = self.token_embedding(text)
hidden_state = hidden_state + self.positional_embedding
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.transformer(hidden_state)
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.ln_final(hidden_state)
hidden_state = hidden_state[torch.arange(hidden_state.shape[0]), text.argmax(dim=-1)]
return hidden_state
|
class_definition
| 115,518 | 117,214 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,134 |
class OneFormerTextMapper(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.text_encoder = OneFormerTextEncoder(
context_length=config.text_encoder_context_length,
width=config.text_encoder_width,
layers=config.text_encoder_num_layers,
vocab_size=config.text_encoder_vocab_size,
layer_norm_eps=config.layer_norm_eps,
)
self.text_projector = OneFormerMLPPredictionHead(
config.text_encoder_width,
config.hidden_dim,
config.hidden_dim,
config.text_encoder_proj_layers,
)
if config.text_encoder_n_ctx > 0:
self.prompt_ctx = nn.Embedding(
config.text_encoder_n_ctx,
config.text_encoder_width,
)
else:
self.prompt_ctx = None
def forward(
self,
inputs: Tensor,
) -> Tensor:
text_queries = self.encode_text(inputs)
return text_queries
def encode_text(self, text):
if text.ndim is None:
raise ValueError("text must not be NoneType")
if text.ndim not in [2, 3]:
raise ValueError("Number of dimensions in text must be 2 or 3")
squeeze_dim = False
num_text = 1
if text.ndim == 3:
num_text = text.shape[1]
batch_size, num_text, hidden_dim = text.shape
text = text.reshape(batch_size * num_text, hidden_dim)
squeeze_dim = True
# [batch_size, num_channels]
encoded_text = self.text_encoder(text)
text_queries = self.text_projector(encoded_text)
if squeeze_dim:
_, hidden_dim = text_queries.shape
text_queries = text_queries.reshape(batch_size, num_text, hidden_dim)
if self.prompt_ctx is not None:
text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1)
text_queries = torch.cat([text_queries, text_queries_ctx], dim=1)
return text_queries
|
class_definition
| 117,217 | 119,316 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,135 |
class OneFormerTaskModel(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.task_mlp = OneFormerMLPPredictionHead(
config.task_seq_len,
config.hidden_dim,
config.hidden_dim,
2,
)
def forward(self, inputs: Tensor) -> Tensor:
task_tokens = self.task_mlp(inputs)
return task_tokens
|
class_definition
| 119,319 | 119,724 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,136 |
class OneFormerPreTrainedModel(PreTrainedModel):
config_class = OneFormerConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, OneFormerTransformerModule):
if module.input_projections is not None:
for input_projection in module.input_projections:
if not isinstance(input_projection, nn.Sequential):
nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std)
nn.init.constant_(input_projection.bias, 0)
elif isinstance(module, OneFormerTransformerDecoder):
nn.init.xavier_uniform_(module.query_input_projection.weight, gain=xavier_std)
nn.init.constant_(module.query_input_projection.bias, 0)
module.query_input_projection._is_hf_initialized = True
elif isinstance(module, OneFormerPixelDecoderEncoderMultiscaleDeformableAttention):
nn.init.constant_(module.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(module.attention_weights.weight.data, 0.0)
nn.init.constant_(module.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(module.value_proj.weight.data)
nn.init.constant_(module.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(module.output_proj.weight.data)
nn.init.constant_(module.output_proj.bias.data, 0.0)
elif isinstance(module, OneFormerPixelDecoderEncoderOnly):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
elif isinstance(module, OneFormerPixelDecoder):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.normal_(module.level_embed, std=0)
elif isinstance(module, OneFormerTransformerDecoderSelfAttentionLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderCrossAttentionLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderFFNLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderQueryTransformer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerPixelLevelModule):
for submodule in module.modules():
if isinstance(submodule, (nn.Conv2d, nn.Linear)):
submodule.weight.data.normal_(mean=0.0, std=std)
if submodule.bias is not None:
submodule.bias.data.zero_()
elif isinstance(module, OneFormerTextContextDecoder):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.trunc_normal_(submodule.weight, std=0.02)
if isinstance(submodule, nn.Linear) and submodule.bias is not None:
nn.init.constant_(submodule.bias, 0)
elif isinstance(submodule, nn.LayerNorm):
nn.init.constant_(submodule.bias, 0)
nn.init.constant_(submodule.weight, 1.0)
elif isinstance(module, OneFormerTextTransformer):
proj_std = (module.width**-0.5) * ((2 * module.num_layers) ** -0.5)
attn_std = module.width**-0.5
fc_std = (2 * module.width) ** -0.5
for layer in module.layers:
nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std)
nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std)
nn.init.normal_(layer.mlp.fc1.weight, std=fc_std)
nn.init.normal_(layer.mlp.fc2.weight, std=proj_std)
elif isinstance(module, OneFormerTextEncoder):
nn.init.normal_(module.token_embedding.weight, std=0.02)
nn.init.normal_(module.positional_embedding, std=0.01)
if hasattr(module, "reference_points"):
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
elif isinstance(module, OneFormerTaskModel):
for submodule in module.modules():
if isinstance(module, OneFormerMLPPredictionHead):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.xavier_uniform_(submodule.weight, gain=xavier_std)
nn.init.constant_(submodule.bias, 0)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.MultiheadAttention):
module.in_proj_weight.data.normal_(mean=0.0, std=std)
module.in_proj_bias.data.zero_()
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
class_definition
| 121,728 | 128,231 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,137 |
class OneFormerModel(OneFormerPreTrainedModel):
main_input_name = ["pixel_values", "task_inputs"]
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.pixel_level_module = OneFormerPixelLevelModule(config)
self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config)
self.task_encoder = OneFormerTaskModel(config)
self.is_training = config.is_training
if self.is_training:
self.text_mapper = OneFormerTextMapper(config)
else:
self.text_mapper = None
self.post_init()
@add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OneFormerModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Tensor,
task_inputs: Tensor,
text_inputs: Optional[Tensor] = None,
pixel_mask: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> OneFormerModelOutput:
r"""
Returns:
`OneFormerModelOutput`
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import requests
>>> from transformers import OneFormerProcessor, OneFormerModel
>>> # download texting image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # load processor for preprocessing the inputs
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> mask_predictions = outputs.transformer_decoder_mask_predictions
>>> class_predictions = outputs.transformer_decoder_class_predictions
>>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}"
'👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states)
multi_scale_features = pixel_level_module_output.decoder_features
mask_features = pixel_level_module_output.decoder_last_feature
task_token = self.task_encoder(task_inputs.to(self.dtype))
if self.is_training:
text_queries = self.text_mapper(text_inputs)
else:
text_queries = None
transformer_module_output = self.transformer_module(
multi_scale_features=multi_scale_features,
mask_features=mask_features,
task_token=task_token,
output_attentions=output_attentions,
)
queries = transformer_module_output.object_queries
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output.encoder_features
pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,)
for f in pixel_level_module_output.decoder_features:
pixel_decoder_hidden_states += (f,)
transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions
output = OneFormerModelOutput(
encoder_hidden_states=encoder_hidden_states,
pixel_decoder_hidden_states=pixel_decoder_hidden_states,
transformer_decoder_hidden_states=transformer_decoder_hidden_states,
transformer_decoder_object_queries=queries,
transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits,
transformer_decoder_mask_predictions=transformer_module_output.prediction_masks,
transformer_decoder_class_predictions=transformer_module_output.prediction_class,
transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions,
text_queries=text_queries,
task_token=task_token,
attentions=transformer_module_output.attentions,
)
if not return_dict:
output = tuple(v for v in output.values())
return output
|
class_definition
| 128,385 | 133,662 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,138 |
class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel):
main_input_name = ["pixel_values", "task_inputs"]
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.model = OneFormerModel(config)
self.matcher = OneFormerHungarianMatcher(
cost_class=config.class_weight,
cost_dice=config.dice_weight,
cost_mask=config.mask_weight,
num_points=config.train_num_points,
)
self.weight_dict: Dict[str, float] = {
"loss_cross_entropy": config.class_weight,
"loss_mask": config.mask_weight,
"loss_dice": config.dice_weight,
"loss_contrastive": config.contrastive_weight,
}
self.criterion = OneFormerLoss(
num_classes=config.num_labels,
matcher=self.matcher,
weight_dict=self.weight_dict,
eos_coef=config.no_object_weight,
num_points=config.train_num_points,
oversample_ratio=config.oversample_ratio,
importance_sample_ratio=config.importance_sample_ratio,
contrastive_temperature=config.contrastive_temperature,
)
self.post_init()
def get_loss_dict(
self,
masks_queries_logits: Tensor,
class_queries_logits: Tensor,
contrastive_queries_logits: Tensor,
mask_labels: Tensor,
class_labels: Tensor,
text_queries: Tensor,
auxiliary_predictions: Dict[str, Tensor],
calculate_contrastive_loss: bool,
) -> Dict[str, Tensor]:
loss_dict: Dict[str, Tensor] = self.criterion(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
contrastive_queries_logits=contrastive_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
text_queries=text_queries,
auxiliary_predictions=auxiliary_predictions,
calculate_contrastive_loss=calculate_contrastive_loss,
)
# weight each loss by `self.weight_dict[<LOSS_NAME>]` including auxiliary losses
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
@add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OneFormerForUniversalSegmentationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Tensor,
task_inputs: Tensor,
text_inputs: Optional[Tensor] = None,
mask_labels: Optional[List[Tensor]] = None,
class_labels: Optional[List[Tensor]] = None,
pixel_mask: Optional[Tensor] = None,
output_auxiliary_logits: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> OneFormerForUniversalSegmentationOutput:
r"""
text_inputs (`List[torch.Tensor]`, *optional*):
Tensor fof shape `(num_queries, sequence_length)` to be fed to a model
mask_labels (`List[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`List[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
Returns:
`OneFormerUniversalSegmentationOutput`
Example:
Universal segmentation example:
```python
>>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # load OneFormer fine-tuned on ADE20k for universal segmentation
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # Semantic Segmentation
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for semantic postprocessing
>>> predicted_semantic_map = processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}"
'👉 Semantic Predictions Shape: [512, 683]'
>>> # Instance Segmentation
>>> inputs = processor(image, ["instance"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for instance postprocessing
>>> predicted_instance_map = processor.post_process_instance_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}"
'👉 Instance Predictions Shape: [512, 683]'
>>> # Panoptic Segmentation
>>> inputs = processor(image, ["panoptic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for panoptic postprocessing
>>> predicted_panoptic_map = processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}"
'👉 Panoptic Predictions Shape: [512, 683]'
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
pixel_values=pixel_values,
task_inputs=task_inputs,
text_inputs=text_inputs,
pixel_mask=pixel_mask,
output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss,
output_attentions=output_attentions,
return_dict=True,
)
loss, loss_dict, auxiliary_predictions = None, None, None
class_queries_logits = outputs.transformer_decoder_class_predictions
masks_queries_logits = outputs.transformer_decoder_mask_predictions
contrastive_queries_logits = outputs.transformer_decoder_contrastive_queries
auxiliary_predictions = outputs.transformer_decoder_auxiliary_predictions
text_queries = outputs.text_queries
if mask_labels is not None and class_labels is not None:
loss_dict: Dict[str, Tensor] = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
contrastive_queries_logits=contrastive_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
text_queries=text_queries,
auxiliary_predictions=auxiliary_predictions,
calculate_contrastive_loss=self.config.contrastive_temperature is not None,
)
loss = self.get_loss(loss_dict)
output_auxiliary_logits = (
self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
)
if not output_auxiliary_logits:
auxiliary_predictions = None
output = OneFormerForUniversalSegmentationOutput(
class_queries_logits=class_queries_logits,
masks_queries_logits=masks_queries_logits,
auxiliary_predictions=auxiliary_predictions,
loss=loss,
**outputs,
)
if not return_dict:
output = tuple(v for v in output.values())
if loss is not None:
output = (loss) + output
return output
|
class_definition
| 133,800 | 143,591 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/modeling_oneformer.py
| null | 5,139 |
class OneFormerProcessor(ProcessorMixin):
r"""
Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and
[`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities.
Args:
image_processor ([`OneFormerImageProcessor`]):
The image processor is a required input.
tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]):
The tokenizer is a required input.
max_seq_len (`int`, *optional*, defaults to 77)):
Sequence length for input text list.
task_seq_len (`int`, *optional*, defaults to 77):
Sequence length for input task token.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "OneFormerImageProcessor"
tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(
self, image_processor=None, tokenizer=None, max_seq_length: int = 77, task_seq_length: int = 77, **kwargs
):
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
self.max_seq_length = max_seq_length
self.task_seq_length = task_seq_length
super().__init__(image_processor, tokenizer)
def _preprocess_text(self, text_list=None, max_length=77):
if text_list is None:
raise ValueError("tokens cannot be None.")
tokens = self.tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True)
attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"]
token_inputs = []
for attn_mask, input_id in zip(attention_masks, input_ids):
token = torch.tensor(attn_mask) * torch.tensor(input_id)
token_inputs.append(token.unsqueeze(0))
token_inputs = torch.cat(token_inputs, dim=0)
return token_inputs
def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs):
"""
Main method to prepare for the model one or several task input(s) and image(s). This method forwards the
`task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not
`None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the
doctsring of the above two methods for more information.
Args:
task_inputs (`str`, `List[str]`):
The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list
of strings of the template "the task is {task}".
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`,
`List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if task_inputs is None:
raise ValueError("You have to specify the task_input. Found None.")
elif images is None:
raise ValueError("You have to specify the image. Found None.")
if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs):
raise ValueError("task_inputs must be semantic, instance, or panoptic.")
encoded_inputs = self.image_processor(images, task_inputs, segmentation_maps, **kwargs)
if isinstance(task_inputs, str):
task_inputs = [task_inputs]
if isinstance(task_inputs, List) and all(isinstance(task_input, str) for task_input in task_inputs):
task_token_inputs = []
for task in task_inputs:
task_input = f"the task is {task}"
task_token_inputs.append(task_input)
encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length)
else:
raise TypeError("Task Inputs should be a string or a list of strings.")
if hasattr(encoded_inputs, "text_inputs"):
texts_list = encoded_inputs.text_inputs
text_inputs = []
for texts in texts_list:
text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length)
text_inputs.append(text_input_list.unsqueeze(0))
encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0)
return encoded_inputs
def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the
task_inputs. Please refer to the docstring of this method for more information.
"""
if task_inputs is None:
raise ValueError("You have to specify the task_input. Found None.")
elif images is None:
raise ValueError("You have to specify the image. Found None.")
if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs):
raise ValueError("task_inputs must be semantic, instance, or panoptic.")
encoded_inputs = self.image_processor.encode_inputs(images, task_inputs, segmentation_maps, **kwargs)
if isinstance(task_inputs, str):
task_inputs = [task_inputs]
if isinstance(task_inputs, List) and all(isinstance(task_input, str) for task_input in task_inputs):
task_token_inputs = []
for task in task_inputs:
task_input = f"the task is {task}"
task_token_inputs.append(task_input)
encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length)
else:
raise TypeError("Task Inputs should be a string or a list of strings.")
if hasattr(encoded_inputs, "text_inputs"):
texts_list = encoded_inputs.text_inputs
text_inputs = []
for texts in texts_list:
text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length)
text_inputs.append(text_input_list.unsqueeze(0))
encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0)
return encoded_inputs
def post_process_semantic_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_semantic_segmentation(*args, **kwargs)
def post_process_instance_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_instance_segmentation(*args, **kwargs)
def post_process_panoptic_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs)
|
class_definition
| 826 | 9,376 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/processing_oneformer.py
| null | 5,140 |
class OneFormerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a
OneFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the OneFormer
[shi-labs/oneformer_ade20k_swin_tiny](https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny) architecture
trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig`, *optional*, defaults to `SwinConfig`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
ignore_value (`int`, *optional*, defaults to 255):
Values to be ignored in GT label while calculating loss.
num_queries (`int`, *optional*, defaults to 150):
Number of object queries.
no_object_weight (`float`, *optional*, defaults to 0.1):
Weight for no-object class predictions.
class_weight (`float`, *optional*, defaults to 2.0):
Weight for Classification CE loss.
mask_weight (`float`, *optional*, defaults to 5.0):
Weight for binary CE loss.
dice_weight (`float`, *optional*, defaults to 5.0):
Weight for dice loss.
contrastive_weight (`float`, *optional*, defaults to 0.5):
Weight for contrastive loss.
contrastive_temperature (`float`, *optional*, defaults to 0.07):
Initial value for scaling the contrastive logits.
train_num_points (`int`, *optional*, defaults to 12544):
Number of points to sample while calculating losses on mask predictions.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Ratio to decide how many points to oversample.
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points that are sampled via importance sampling.
init_std (`float`, *optional*, defaults to 0.02):
Standard deviation for normal intialization.
init_xavier_std (`float`, *optional*, defaults to 1.0):
Standard deviation for xavier uniform initialization.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon for layer normalization.
is_training (`bool`, *optional*, defaults to `False`):
Whether to run in training or inference mode.
use_auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether to calculate loss using intermediate predictions from transformer decoder.
output_auxiliary_logits (`bool`, *optional*, defaults to `True`):
Whether to return intermediate predictions from transformer decoder.
strides (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
List containing the strides for feature maps in the encoder.
task_seq_len (`int`, *optional*, defaults to 77):
Sequence length for tokenizing text list input.
text_encoder_width (`int`, *optional*, defaults to 256):
Hidden size for text encoder.
text_encoder_context_length (`int`, *optional*, defaults to 77):
Input sequence length for text encoder.
text_encoder_num_layers (`int`, *optional*, defaults to 6):
Number of layers for transformer in text encoder.
text_encoder_vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size for tokenizer.
text_encoder_proj_layers (`int`, *optional*, defaults to 2):
Number of layers in MLP for project text queries.
text_encoder_n_ctx (`int`, *optional*, defaults to 16):
Number of learnable text context queries.
conv_dim (`int`, *optional*, defaults to 256):
Feature map dimension to map outputs from the backbone.
mask_dim (`int`, *optional*, defaults to 256):
Dimension for feature maps in pixel decoder.
hidden_dim (`int`, *optional*, defaults to 256):
Dimension for hidden states in transformer decoder.
encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
Dimension for FFN layer in pixel decoder.
norm (`str`, *optional*, defaults to `"GN"`):
Type of normalization.
encoder_layers (`int`, *optional*, defaults to 6):
Number of layers in pixel decoder.
decoder_layers (`int`, *optional*, defaults to 10):
Number of layers in transformer decoder.
use_task_norm (`bool`, *optional*, defaults to `True`):
Whether to normalize the task token.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in transformer layers in the pixel and transformer decoders.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability for pixel and transformer decoders.
dim_feedforward (`int`, *optional*, defaults to 2048):
Dimension for FFN layer in transformer decoder.
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to normalize hidden states before attention layers in transformer decoder.
enforce_input_proj (`bool`, *optional*, defaults to `False`):
Whether to project hidden states in transformer decoder.
query_dec_layers (`int`, *optional*, defaults to 2):
Number of layers in query transformer.
common_stride (`int`, *optional*, defaults to 4):
Common stride used for features in pixel decoder.
Examples:
```python
>>> from transformers import OneFormerConfig, OneFormerModel
>>> # Initializing a OneFormer shi-labs/oneformer_ade20k_swin_tiny configuration
>>> configuration = OneFormerConfig()
>>> # Initializing a model (with random weights) from the shi-labs/oneformer_ade20k_swin_tiny style configuration
>>> model = OneFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "oneformer"
attribute_map = {"hidden_size": "hidden_dim"}
def __init__(
self,
backbone_config: Optional[Dict] = None,
backbone: Optional[str] = None,
use_pretrained_backbone: bool = False,
use_timm_backbone: bool = False,
backbone_kwargs: Optional[Dict] = None,
ignore_value: int = 255,
num_queries: int = 150,
no_object_weight: int = 0.1,
class_weight: float = 2.0,
mask_weight: float = 5.0,
dice_weight: float = 5.0,
contrastive_weight: float = 0.5,
contrastive_temperature: float = 0.07,
train_num_points: int = 12544,
oversample_ratio: float = 3.0,
importance_sample_ratio: float = 0.75,
init_std: float = 0.02,
init_xavier_std: float = 1.0,
layer_norm_eps: float = 1e-05,
is_training: bool = False,
use_auxiliary_loss: bool = True,
output_auxiliary_logits: bool = True,
strides: Optional[list] = [4, 8, 16, 32],
task_seq_len: int = 77,
text_encoder_width: int = 256,
text_encoder_context_length: int = 77,
text_encoder_num_layers: int = 6,
text_encoder_vocab_size: int = 49408,
text_encoder_proj_layers: int = 2,
text_encoder_n_ctx: int = 16,
conv_dim: int = 256,
mask_dim: int = 256,
hidden_dim: int = 256,
encoder_feedforward_dim: int = 1024,
norm: str = "GN",
encoder_layers: int = 6,
decoder_layers: int = 10,
use_task_norm: bool = True,
num_attention_heads: int = 8,
dropout: float = 0.1,
dim_feedforward: int = 2048,
pre_norm: bool = False,
enforce_input_proj: bool = False,
query_dec_layers: int = 2,
common_stride: int = 4,
**kwargs,
):
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is unset. Initializing the config with the default `Swin` backbone.")
backbone_config = CONFIG_MAPPING["swin"](
image_size=224,
num_channels=3,
patch_size=4,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.3,
use_absolute_embeddings=False,
out_features=["stage1", "stage2", "stage3", "stage4"],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.ignore_value = ignore_value
self.num_queries = num_queries
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.contrastive_weight = contrastive_weight
self.contrastive_temperature = contrastive_temperature
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.layer_norm_eps = layer_norm_eps
self.is_training = is_training
self.use_auxiliary_loss = use_auxiliary_loss
self.output_auxiliary_logits = output_auxiliary_logits
self.strides = strides
self.task_seq_len = task_seq_len
self.text_encoder_width = text_encoder_width
self.text_encoder_context_length = text_encoder_context_length
self.text_encoder_num_layers = text_encoder_num_layers
self.text_encoder_vocab_size = text_encoder_vocab_size
self.text_encoder_proj_layers = text_encoder_proj_layers
self.text_encoder_n_ctx = text_encoder_n_ctx
self.conv_dim = conv_dim
self.mask_dim = mask_dim
self.hidden_dim = hidden_dim
self.encoder_feedforward_dim = encoder_feedforward_dim
self.norm = norm
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.use_task_norm = use_task_norm
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.dim_feedforward = dim_feedforward
self.pre_norm = pre_norm
self.enforce_input_proj = enforce_input_proj
self.query_dec_layers = query_dec_layers
self.common_stride = common_stride
self.num_hidden_layers = decoder_layers
super().__init__(**kwargs)
|
class_definition
| 937 | 13,435 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/oneformer/configuration_oneformer.py
| null | 5,141 |
class LlavaNextVideoConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf)
model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
ignore_index (`int`, *optional*, defaults to -100):
The ignore index for the loss function.
image_token_index (`int`, *optional*, defaults to 32001):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -2):
The index of the layer to select the vision feature.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
video_token_index (`int`, *optional*, defaults to 32000):
The video token index to encode the image prompt.
spatial_pool_mode (`str`, *optional*, defaults to `"average"`):
Pooling mode to use for videos. Can be "average", "max" or "conv".
spatial_pool_stride (`int`, *optional*, defaults to 2):
Stride used in the pooling layer for videos.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
video_seq_length (`int`, *optional*, defaults to 288):
Sequence length of one video embedding.
Example:
```python
>>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> configuration = LlavaNextVideoConfig(vision_config, text_config)
>>> model = LlavaNextVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next_video"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
ignore_index=-100,
image_token_index=32001,
projector_hidden_act="gelu",
multimodal_projector_bias=True,
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
video_token_index=32000,
spatial_pool_mode="average",
spatial_pool_stride=2,
image_seq_length=576,
video_seq_length=288,
**kwargs,
):
self.video_token_index = video_token_index
self.spatial_pool_mode = spatial_pool_mode
self.spatial_pool_stride = spatial_pool_stride
self.image_seq_length = image_seq_length
self.video_seq_length = video_seq_length
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class_definition
| 1,571 | 8,209 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/configuration_llava_next_video.py
| null | 5,142 |
class LlavaNextVideoConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf)
model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
ignore_index (`int`, *optional*, defaults to -100):
The ignore index for the loss function.
image_token_index (`int`, *optional*, defaults to 32001):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -2):
The index of the layer to select the vision feature.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
video_token_index (`int`, *optional*, defaults to 32000):
The video token index to encode the image prompt.
spatial_pool_mode (`str`, *optional*, defaults to `"average"`):
Pooling mode to use for videos. Can be "average", "max" or "conv".
spatial_pool_stride (`int`, *optional*, defaults to 2):
Stride used in the pooling layer for videos.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
video_seq_length (`int`, *optional*, defaults to 288):
Sequence length of one video embedding.
Example:
```python
>>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> configuration = LlavaNextVideoConfig(vision_config, text_config)
>>> model = LlavaNextVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next_video"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
ignore_index=-100,
image_token_index=32001,
projector_hidden_act="gelu",
multimodal_projector_bias=True,
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
video_token_index=32000,
spatial_pool_mode="average",
spatial_pool_stride=2,
image_seq_length=576,
video_seq_length=288,
**kwargs,
):
self.video_token_index = video_token_index
self.spatial_pool_mode = spatial_pool_mode
self.spatial_pool_stride = spatial_pool_stride
self.image_seq_length = image_seq_length
self.video_seq_length = video_seq_length
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class_definition
| 1,166 | 7,804 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modular_llava_next_video.py
| null | 5,143 |
class LlavaNextVideoCausalLMOutputWithPast(LlavaNextCausalLMOutputWithPast):
"""
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
video_hidden_states: Optional[torch.FloatTensor] = None
|
class_definition
| 7,818 | 8,256 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modular_llava_next_video.py
| null | 5,144 |
class LlavaNextVideoPooler(nn.Module):
def __init__(self, config):
super().__init__()
mode = config.spatial_pool_mode
stride = config.spatial_pool_stride
out_channels = getattr(config, "spatial_pool_out_channels", config.vision_config.hidden_size)
self.image_size = (config.vision_config.image_size // config.vision_config.patch_size) ** 2
if mode == "average":
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride)
elif mode == "max":
self.pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
elif mode == "conv":
self.pool = nn.Conv2d(
in_channels=config.vision_config.hidden_size,
out_channels=out_channels,
kernel_size=stride,
stride=stride,
)
else:
raise ValueError(f"Unknown pooling mode: {mode}. Has to be one of [`average`, `max`, `conv`]")
def forward(self, image_features):
ori_width = int(math.sqrt(image_features.shape[1] * self.image_size // self.image_size))
ori_height = int(ori_width * self.image_size // self.image_size)
batch_size, _, dim = image_features.shape
image_features_spatial = image_features.view(batch_size, ori_height, ori_height, dim).permute(0, 3, 1, 2)
image_features_spatial_pool = self.pool(image_features_spatial)
return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous()
|
class_definition
| 8,259 | 9,748 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modular_llava_next_video.py
| null | 5,145 |
class LlavaNextVideoPreTrainedModel(LlavaNextPreTrainedModel):
pass
|
class_definition
| 9,751 | 9,822 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modular_llava_next_video.py
| null | 5,146 |
class LlavaNextVideoForConditionalGeneration(LlavaNextForConditionalGeneration):
def __init__(self, config: LlavaNextVideoConfig, **super_kwargs):
super().__init__(config, **super_kwargs)
self.vision_resampler = LlavaNextVideoPooler(config)
self.post_init()
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: int,
vision_feature_select_strategy: str,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
The tensors corresponding to the input images.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_layer (`int`):
The index of the layer to select the vision feature.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
and are of shape `(num_patches, image_length, embed_dim)`).
"""
# ! infer image_num_patches from image_sizes
image_num_patches = [
image_size_to_num_patches(
image_size=imsize,
grid_pinpoints=self.config.image_grid_pinpoints,
patch_size=self.config.vision_config.image_size,
)
for imsize in image_sizes
]
if pixel_values.dim() == 5:
# stacked if input is (batch_size, num_patches, num_channels, height, width)
_pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]
pixel_values = torch.cat(_pixel_values_list, dim=0)
elif pixel_values.dim() != 4:
# otherwise has to be stacked from list of (num_patches, num_channels, height, width)
raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions")
image_features = self.vision_tower(pixel_values, output_hidden_states=True)
selected_image_feature = image_features.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
elif vision_feature_select_strategy == "full":
selected_image_feature = selected_image_feature
image_features = self.multi_modal_projector(selected_image_feature)
image_features = torch.split(image_features, image_num_patches, dim=0)
return image_features
def get_video_features(
self, pixel_values: torch.FloatTensor, vision_feature_layer: int, vision_feature_select_strategy: str
):
"""
Obtains video last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
The tensors corresponding to the input video.
vision_feature_layer (`int`):
The index of the layer to select the vision feature.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
video_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches
and are of shape `(num_videos, video_length, embed_dim)`).
"""
batch_size, frames, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channels, height, width)
video_features = self.vision_tower(pixel_values, output_hidden_states=True)
selected_video_features = video_features.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_video_features = selected_video_features[:, 1:]
elif vision_feature_select_strategy == "full":
selected_video_features = selected_video_features
# Same as image features except that video has pooling layer
video_features = self.vision_resampler(selected_video_features)
video_features = self.multi_modal_projector(video_features)
video_features = torch.split(video_features, frames, dim=0)
return video_features
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
pixel_values_videos: torch.FloatTensor = None,
image_sizes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[int] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
num_logits_to_keep: int = 0,
) -> Union[Tuple, LlavaNextVideoCausalLMOutputWithPast]:
r"""
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, image_size, image_size)):
The tensors corresponding to the input videos. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`LlavaNextVideoVideoProcessor.__call__`] for details. [`LlavaProcessor`] uses
[`LlavaNextVideoVideoProcessor`] for processing videos.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
num_logits_to_keep (`int`, *optional*):
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
Returns:
Example:
```python
>>> from PIL import Image
>>> import requests
>>> import av
>>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`List[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto")
>>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
>>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:"
>>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset")
>>> container = av.open(video_path)
>>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short)
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 8).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device)
>>> # load an image to generate from an image
>>> prompt = "USER:<image>\nWhat is shown in this image? ASSISTANT:"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
>>> # Generate from video
>>> generate_ids = model.generate(**inputs_video, max_length=50)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER:\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)"
>>> # Generate from image
>>> generate_ids = model.generate(**inputs_image, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: \nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self.vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
self.vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
raise ValueError(
"You cannot specify both `pixel_values`/`pixel_values_videos` and `inputs_embeds` at the same time, "
"and must specify either one"
)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None and pixel_values.size(0) > 0:
image_features = self.get_image_features(
pixel_values,
image_sizes,
vision_feature_layer=self.vision_feature_layer,
vision_feature_select_strategy=self.vision_feature_select_strategy,
)
image_features, feature_lens = self.pack_image_features(
image_features,
image_sizes,
self.vision_feature_select_strategy,
image_newline=self.image_newline,
)
n_image_tokens = (input_ids == self.config.image_token_index).sum().item()
n_image_features = image_features.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
if pixel_values_videos is not None and pixel_values_videos.size(0) > 0:
video_features = self.get_video_features(
pixel_values_videos,
vision_feature_layer=self.vision_feature_layer,
vision_feature_select_strategy=self.vision_feature_select_strategy,
)
video_features = [feature.flatten(0, 1) for feature in video_features]
video_feature_lens = [feature.size(0) for feature in video_features]
video_features = torch.cat(video_features, dim=0)
video_feature_lens = torch.tensor(video_feature_lens, dtype=torch.long, device=video_features.device)
n_video_tokens = (input_ids == self.config.video_token_index).sum().item()
n_video_features = video_features.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1)
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
video_features = video_features.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
num_logits_to_keep=num_logits_to_keep,
)
logits = outputs[0]
loss = None
if labels is not None:
# Shift so that tokens < n predict n
if attention_mask is not None:
# we use the input attention mask to shift the logits and labels, because it is 2D.
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device)
shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
else:
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return LlavaNextVideoCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=video_features if pixel_values_videos is not None else None,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_values_videos=None,
image_sizes=None,
attention_mask=None,
cache_position=None,
num_logits_to_keep=None,
**kwargs,
):
# Overwritten -- extra custom processing
model_inputs = self.language_model.prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
num_logits_to_keep=num_logits_to_keep,
**kwargs,
)
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
if cache_position[0] == 0:
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_values_videos"] = pixel_values_videos
model_inputs["image_sizes"] = image_sizes
return model_inputs
|
class_definition
| 9,825 | 27,491 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modular_llava_next_video.py
| null | 5,147 |
class LlavaNextVideoImageProcessor(BaseImageProcessor):
r"""
Constructs a LLaVa-NeXT-Video video processor. Based on [`CLIPImageProcessor`] with incorporation of processing each video frame.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method. Not used for processinf videos.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`Dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values_videos"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
image_grid_pinpoints: List = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.image_grid_pinpoints = image_grid_pinpoints
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize with CLIP->LLaVa
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def _preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: int = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> Image.Image:
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
return images
def preprocess(
self,
images: VideoInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: int = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_convert_rgb: bool = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`VideoInput`):
Videos to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the video.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the video after resizing. Shortest edge of the video is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the video. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the video.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the video.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the video by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the video.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Frame mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Frame standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the video to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = make_batched_videos(images)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# preprocess each video frame by frame
pixel_values = [
self._preprocess(
frames,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for frames in images
]
data = {"pixel_values_videos": pixel_values}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class_definition
| 2,029 | 21,424 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/image_processing_llava_next_video.py
| null | 5,148 |
class LlavaNextVideoProcessor(ProcessorMixin):
r"""
Constructs a LLaVa-NeXT-Video processor which wraps a LLaVa-NeXT image processor, LLaVa-NeXT-Video video processor and
a LLaMa tokenizer into a single processor.
[`LlavaNextVideoProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`], [`LlavaNextVideoImageProcessor`] and
[`LlamaTokenizerFast`]. See the [`~LlavaNextVideoProcessor.__call__`] and [`~LlavaNextVideoProcessor.decode`] for more information.
Args:
video_processor ([`LlavaNextVideoImageProcessor`], *optional*):
The video processor is a required input.
image_processor ([`LlavaNextImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*):
Jinja chat template that will be used in tokenizer's `apply_chat_template`
patch_size (`int`, *optional*):
Patch size from the vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Shoudl be same as in model's config
video_token (`str`, *optional*, defaults to `"<video>"`):
Special token used to denote video location.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
num_additional_image_tokens (`int`, *optional*, defaults to 0):
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
extra tokens appended, no need to set this arg.
"""
# video and image processor share same args, but have different processing logic
# only image processor config is saved in the hub
attributes = ["video_processor", "image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"vision_feature_select_strategy",
"image_token",
"video_token",
"num_additional_image_tokens",
]
image_processor_class = "LlavaNextImageProcessor"
video_processor_class = "LlavaNextVideoImageProcessor"
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
def __init__(
self,
video_processor=None,
image_processor=None,
tokenizer=None,
chat_template=None,
patch_size=None,
vision_feature_select_strategy=None,
video_token="<video>",
image_token="<image>",
num_additional_image_tokens=0,
**kwargs,
):
self.patch_size = patch_size
self.num_additional_image_tokens = num_additional_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
images: ImageInput = None,
videos: VideoInput = None,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: int = None,
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. To prepare the video(s),
this method forwards the `videos` and `kwrags` arguments to LlavaNextVideoImageProcessor's
[`~LlavaNextVideoImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if images is not None:
image_inputs = self.image_processor(images, return_tensors=return_tensors)
else:
image_inputs = {}
if videos is not None:
videos_inputs = self.video_processor(videos, return_tensors=return_tensors)
else:
videos_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
if image_inputs:
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0]))
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
if not isinstance(image_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
image_size = image_size.tolist()
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
text = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
# videos are easier, simply get frames and multiply
if videos_inputs:
one_video = to_numpy_array(videos_inputs.get("pixel_values_videos")[0])
height, width = get_image_size(one_video[0])
num_frames = one_video.shape[0] # frame dim is always after batch dim
# no `self.num_additional_image_tokens` added because video always has a default feature selection strategy
num_image_tokens = (height // self.patch_size) * (width // self.patch_size)
num_video_tokens = num_image_tokens // 4 * num_frames # divide by 4 needed for avg pooling layer
prompt_strings = []
for sample in text:
sample = sample.replace(self.video_token, self.video_token * num_video_tokens)
prompt_strings.append(sample)
text = prompt_strings
text_inputs = self.tokenizer(
text,
return_tensors=return_tensors,
padding=padding,
truncation=truncation,
max_length=max_length,
)
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs})
# Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_number_of_features
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = height // self.patch_size
patches_width = width // self.patch_size
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (+1 for the CLS)
base_features = patches_height * patches_width + self.num_additional_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
# Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = (height * current_width) // width
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = (width * current_height) // height
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
class_definition
| 1,168 | 15,080 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/processing_llava_next_video.py
| null | 5,149 |
class LlavaNextVideoCausalLMOutputWithPast(ModelOutput):
"""
Base class for LlavaNextVideo causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
|
class_definition
| 2,174 | 5,151 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py
| null | 5,150 |
class LlavaNextVideoPooler(nn.Module):
def __init__(self, config):
super().__init__()
mode = config.spatial_pool_mode
stride = config.spatial_pool_stride
out_channels = getattr(config, "spatial_pool_out_channels", config.vision_config.hidden_size)
self.image_size = (config.vision_config.image_size // config.vision_config.patch_size) ** 2
if mode == "average":
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride)
elif mode == "max":
self.pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
elif mode == "conv":
self.pool = nn.Conv2d(
in_channels=config.vision_config.hidden_size,
out_channels=out_channels,
kernel_size=stride,
stride=stride,
)
else:
raise ValueError(f"Unknown pooling mode: {mode}. Has to be one of [`average`, `max`, `conv`]")
def forward(self, image_features):
ori_width = int(math.sqrt(image_features.shape[1] * self.image_size // self.image_size))
ori_height = int(ori_width * self.image_size // self.image_size)
batch_size, _, dim = image_features.shape
image_features_spatial = image_features.view(batch_size, ori_height, ori_height, dim).permute(0, 3, 1, 2)
image_features_spatial_pool = self.pool(image_features_spatial)
return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous()
|
class_definition
| 5,154 | 6,643 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py
| null | 5,151 |
class LlavaNextVideoPreTrainedModel(PreTrainedModel):
config_class = LlavaNextVideoConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LlavaNextVideoVisionAttention"]
_skip_keys_device_placement = "past_key_values"
_supports_cache_class = True
_supports_flash_attn_2 = True
_supports_sdpa = True
def _init_weights(self, module):
# important: this ported version of LlavaNextVideo isn't meant for training from scratch - only
# inference and fine-tuning - so the proper init weights code has been removed - the original codebase
# https://github.com/haotian-liu/LLaVA/tree/main/llava_next_video should serve for that purpose
std = (
self.config.initializer_range
if hasattr(self.config, "initializer_range")
else self.config.text_config.initializer_range
)
if hasattr(module, "class_embedding"):
module.class_embedding.data.normal_(mean=0.0, std=std)
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
class_definition
| 7,730 | 9,167 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py
| null | 5,152 |
class LlavaNextVideoMultiModalProjector(nn.Module):
def __init__(self, config: LlavaNextVideoConfig):
super().__init__()
self.linear_1 = nn.Linear(
config.vision_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
def forward(self, image_features):
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
|
class_definition
| 9,170 | 9,901 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py
| null | 5,153 |
class LlavaNextVideoForConditionalGeneration(LlavaNextVideoPreTrainedModel, GenerationMixin):
def __init__(
self,
config: LlavaNextVideoConfig,
):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = LlavaNextVideoMultiModalProjector(config)
embed_std = 1 / math.sqrt(config.text_config.hidden_size)
self.image_newline = nn.Parameter(torch.randn(config.text_config.hidden_size, dtype=self.dtype) * embed_std)
self.vocab_size = config.text_config.vocab_size
self.language_model = AutoModelForCausalLM.from_config(config.text_config)
if self.language_model._tied_weights_keys is not None:
self._tied_weights_keys = [f"language_model.{k}" for k in self.language_model._tied_weights_keys]
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
self.vision_resampler = LlavaNextVideoPooler(config)
self.post_init()
@property
def padding_side(self):
return self._padding_side
@padding_side.setter
def padding_side(self, padding_side: str):
if padding_side not in ["left", "right"]:
raise ValueError(f"{padding_side} is not `left` or `right`.")
self._padding_side = padding_side
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_output_embeddings(self):
return self.language_model.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def set_decoder(self, decoder):
self.language_model.set_decoder(decoder)
def get_decoder(self):
return self.language_model.get_decoder()
def _merge_input_ids_with_image_features(
self,
image_features,
feature_lens,
inputs_embeds,
input_ids,
attention_mask,
position_ids=None,
labels=None,
image_token_index=None,
ignore_index=-100,
):
"""
Merge input_ids with with image features into final embeddings
Args:
image_features (`torch.Tensor` of shape `(all_feature_lens, embed_dim)`):
All vision vectors of all images in the batch
feature_lens (`torch.LongTensor` of shape `(num_images)`):
The length of visual embeddings of each image as stacked in `image_features`
inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`):
Token embeddings before merging with visual embeddings
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Input_ids of tokens, possibly filled with image token
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Mask to avoid performing attention on padding token indices.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*)
:abels need to be recalculated to support training (if provided)
image_token_index (`int`, *optional*)
Token id used to indicate the special "image" token. Defaults to `config.image_token_index`
ignore_index (`int`, *optional*)
Value that is used to pad `labels` and will be ignored when calculated loss. Default: -100.
Returns:
final_embedding, final_attention_mask, position_ids, final_labels
Explanation:
each image has variable length embeddings, with length specified by feature_lens
image_features is concatenation of all visual embed vectors
task: fill each <image> with the correct number of visual embeddings
Example:
X (5 patches), Y (3 patches), Z (8)
X, Y are in the same sequence (in-context learning)
if right padding
input_ids: [
a b c d e f X g h i j k Y l m
o p q r Z s t u v _ _ _ _ _ _
]
input_ids should be: [
a b c d e f X X X X X g h i j k Y Y Y l m
o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _
]
labels should be: [
a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _
]
elif left padding
input_ids: [
a b c d e f X g h i j k Y l m
_ _ _ _ _ _ o p q r Z s t u v
]
input_ids should be: [
a b c d e f X X X X X g h i j k Y Y Y l m
_ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v
]
labels should be: [
a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
_ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v
]
Edge cases:
* If tokens are same but image token sizes are different, then cannot infer left or right padding
```python
cat_img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
chart_img = Image.open(requests.get("https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true", stream=True).raw)
prompts = [
"[INST] <image>\nWhat is shown in this image? [/INST]",
"[INST] <image>\nWhat is shown in this image? [/INST]",
]
inputs = processor(prompts, [chart_img, cat_img], return_tensors='pt', padding=True).to("cuda")
chart_img has 2634 tokens, while cat_img has 2340 tokens
```
input_ids: [
a b c d X g h
i j Y k l m n
]
where X is 3 tokens while Y is 5, this mean after merge
if left-padding (batched generation)
input_ids should be: [
_ _ a b c d X X X g h
i j Y Y Y Y Y k l m n
]
elif (right padding) (training)
input_ids should be: [
a b c d X X X g h _ _
i j Y Y Y Y Y k l m n
]
"""
image_token_index = image_token_index if image_token_index is not None else self.config.image_token_index
ignore_index = ignore_index if ignore_index is not None else self.config.ignore_index
if self.training and self.padding_side == "left":
logger.warning_once(
"Padding side is set to 'left' but the model is in training mode. For training "
"it is recommended to set `model.padding_side='right' and `processor.tokenizer.padding_side='right'`. "
"If that's intended, ignore this warning"
)
if not self.training and self.padding_side == "right":
logger.warning_once(
"Padding side is set to 'right' but the model is in inference mode. For correct "
"generation results, please set `model.padding_side='left'` and `processor.tokenizer.padding_side='left'`. "
"If that's intended, ignore this warning"
)
with torch.no_grad():
# ! in llava 1.6, number of patches is variable
num_images = feature_lens.size(0)
num_image_features, embed_dim = image_features.shape
if feature_lens.sum() != num_image_features:
raise ValueError(f"{feature_lens=} / {feature_lens.sum()} != {image_features.shape=}")
batch_size = input_ids.shape[0]
_left_padding = torch.any(attention_mask[:, 0] == 0)
_right_padding = torch.any(attention_mask[:, -1] == 0)
left_padding = self.padding_side == "left"
if batch_size > 1:
if _left_padding and _right_padding:
raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}")
elif _right_padding and left_padding:
left_padding = False
elif _left_padding and not left_padding:
left_padding = True
# Whether to turn off right padding
# 1. Create a mask to know where special image tokens are
special_image_token_mask = input_ids == image_token_index
# special_image_token_mask: [bsz, seqlen]
num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
# num_special_image_tokens: [bsz]
# Reserve for padding of num_images
total_num_special_image_tokens = torch.sum(special_image_token_mask)
if total_num_special_image_tokens != num_images:
raise ValueError(
f"Number of image tokens in input_ids ({total_num_special_image_tokens}) different from num_images ({num_images})."
)
# Compute the maximum embed dimension
# max_image_feature_lens is max_feature_lens per batch
feature_lens = feature_lens.to(input_ids.device)
feature_lens_batch = feature_lens.split(num_special_image_tokens.tolist(), dim=0)
feature_lens_batch_sum = torch.tensor([x.sum() for x in feature_lens_batch], device=input_ids.device)
embed_sequence_lengths = (
(attention_mask == 1).long().sum(-1) - num_special_image_tokens + feature_lens_batch_sum
)
max_embed_dim = embed_sequence_lengths.max()
batch_indices, non_image_indices = torch.where((input_ids != image_token_index) & (attention_mask == 1))
# 2. Compute the positions where text should be written
# Calculate new positions for text tokens in merged image-text sequence.
# `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images` text tokens.
# `torch.cumsum` computes how each image token shifts subsequent text token positions.
# - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
# ! instead of special_image_token_mask * (num_image_patches - 1)
# special_image_token_mask * (num_feature_len - 1)
special_image_token_mask = special_image_token_mask.long()
special_image_token_mask[special_image_token_mask == 1] = feature_lens - 1
new_token_positions = torch.cumsum((special_image_token_mask + 1), -1) - 1
if left_padding:
# shift right token positions so that they are ending at the same number
# the below here was incorrect? new_token_positions += new_token_positions[:, -1].max() - new_token_positions[:, -1:]
new_token_positions += max_embed_dim - 1 - new_token_positions[:, -1:]
text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
# 3. Create the full embedding, already padded to the maximum position
final_embedding = torch.zeros(
batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
)
final_attention_mask = torch.zeros(
batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
)
final_input_ids = torch.full(
(batch_size, max_embed_dim), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device
)
# In case the Vision model or the Language model has been offloaded to CPU, we need to manually
# set the corresponding tensors into their correct target device.
target_device = inputs_embeds.device
batch_indices, non_image_indices, text_to_overwrite = (
batch_indices.to(target_device),
non_image_indices.to(target_device),
text_to_overwrite.to(target_device),
)
attention_mask = attention_mask.to(target_device)
input_ids = input_ids.to(target_device)
# 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
# we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_image_indices]
final_labels = None
if labels is not None:
labels = labels.to(target_device)
final_labels = torch.full_like(final_attention_mask, ignore_index).to(torch.long)
final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
# 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835)
with torch.no_grad():
image_to_overwrite = torch.full(
(batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device
)
image_to_overwrite[batch_indices, text_to_overwrite] = False
embed_indices = torch.arange(max_embed_dim).unsqueeze(0).to(target_device)
embed_indices = embed_indices.expand(batch_size, max_embed_dim)
embed_seq_lens = embed_sequence_lengths[:, None].to(target_device)
if left_padding:
# exclude padding on the left
max_embed_dim = max_embed_dim.to(target_device)
val = (max_embed_dim - embed_indices) <= embed_seq_lens
else:
# exclude padding on the right
val = embed_indices < embed_seq_lens
image_to_overwrite &= val
if image_to_overwrite.sum() != num_image_features:
raise ValueError(
f"{image_to_overwrite.sum()=} != {num_image_features=} The input provided to the model are wrong. "
f"The number of image tokens is {torch.sum(special_image_token_mask)} while"
f" the number of image given to the model is {num_images}. "
f"This prevents correct indexing and breaks batch generation."
)
final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
final_attention_mask |= image_to_overwrite
position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
return final_embedding, final_attention_mask, position_ids, final_labels, final_input_ids
def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):
"""
Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
Args:
image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
List of image feature tensor, each contains all the visual feature of all patches.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_select_strategy (`str`)
The feature selection strategy used to select the vision feature from the vision backbone.
image_newline (`torch.Tensor` of shape `(embed_dim)`)
New line embedding vector.
Returns:
image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
feature_lens (`List[int]`)
token length of each image in image_features
"""
new_image_features = []
feature_lens = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
if vision_feature_select_strategy == "default":
expected_num_patches = height * width
elif vision_feature_select_strategy == "full":
expected_num_patches = height * width + 1
if expected_num_patches != base_image_feature.shape[0]:
raise ValueError("The number of patches is not consistent with the image size.")
num_patch_height, num_patch_width = get_anyres_image_grid_shape(
image_sizes[image_idx],
self.config.image_grid_pinpoints,
self.config.vision_config.image_size,
)
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
if image_newline is not None:
image_feature = torch.cat(
(
image_feature,
image_newline[:, None, None]
.expand(*image_feature.shape[:-1], 1)
.to(image_feature.device, image_feature.dtype),
),
dim=-1,
)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
if image_newline is not None:
image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
new_image_features.append(image_feature)
feature_lens.append(image_feature.size(0))
image_features = torch.cat(new_image_features, dim=0)
feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features.device)
return image_features, feature_lens
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: int,
vision_feature_select_strategy: str,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
The tensors corresponding to the input images.
image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
Actual image size of each images (H, W).
vision_feature_layer (`int`):
The index of the layer to select the vision feature.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
and are of shape `(num_patches, image_length, embed_dim)`).
"""
# ! infer image_num_patches from image_sizes
image_num_patches = [
image_size_to_num_patches(
image_size=imsize,
grid_pinpoints=self.config.image_grid_pinpoints,
patch_size=self.config.vision_config.image_size,
)
for imsize in image_sizes
]
if pixel_values.dim() == 5:
# stacked if input is (batch_size, num_patches, num_channels, height, width)
_pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]
pixel_values = torch.cat(_pixel_values_list, dim=0)
elif pixel_values.dim() != 4:
# otherwise has to be stacked from list of (num_patches, num_channels, height, width)
raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions")
image_features = self.vision_tower(pixel_values, output_hidden_states=True)
selected_image_feature = image_features.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
elif vision_feature_select_strategy == "full":
selected_image_feature = selected_image_feature
image_features = self.multi_modal_projector(selected_image_feature)
image_features = torch.split(image_features, image_num_patches, dim=0)
return image_features
@add_start_docstrings_to_model_forward(LLAVA_NEXT_VIDEO_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=LlavaNextVideoCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
pixel_values_videos: torch.FloatTensor = None,
image_sizes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[int] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
num_logits_to_keep: int = 0,
) -> Union[Tuple, LlavaNextVideoCausalLMOutputWithPast]:
r"""
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, image_size, image_size)):
The tensors corresponding to the input videos. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`LlavaNextVideoVideoProcessor.__call__`] for details. [`LlavaProcessor`] uses
[`LlavaNextVideoVideoProcessor`] for processing videos.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
num_logits_to_keep (`int`, *optional*):
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
Returns:
Example:
```python
>>> from PIL import Image
>>> import requests
>>> import av
>>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`List[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto")
>>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
>>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:"
>>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset")
>>> container = av.open(video_path)
>>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short)
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 8).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device)
>>> # load an image to generate from an image
>>> prompt = "USER:<image>\nWhat is shown in this image? ASSISTANT:"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
>>> # Generate from video
>>> generate_ids = model.generate(**inputs_video, max_length=50)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER:\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)"
>>> # Generate from image
>>> generate_ids = model.generate(**inputs_image, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: \nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self.vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
self.vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
raise ValueError(
"You cannot specify both `pixel_values`/`pixel_values_videos` and `inputs_embeds` at the same time, "
"and must specify either one"
)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None and pixel_values.size(0) > 0:
image_features = self.get_image_features(
pixel_values,
image_sizes,
vision_feature_layer=self.vision_feature_layer,
vision_feature_select_strategy=self.vision_feature_select_strategy,
)
image_features, feature_lens = self.pack_image_features(
image_features,
image_sizes,
self.vision_feature_select_strategy,
image_newline=self.image_newline,
)
n_image_tokens = (input_ids == self.config.image_token_index).sum().item()
n_image_features = image_features.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
if pixel_values_videos is not None and pixel_values_videos.size(0) > 0:
video_features = self.get_video_features(
pixel_values_videos,
vision_feature_layer=self.vision_feature_layer,
vision_feature_select_strategy=self.vision_feature_select_strategy,
)
video_features = [feature.flatten(0, 1) for feature in video_features]
video_feature_lens = [feature.size(0) for feature in video_features]
video_features = torch.cat(video_features, dim=0)
video_feature_lens = torch.tensor(video_feature_lens, dtype=torch.long, device=video_features.device)
n_video_tokens = (input_ids == self.config.video_token_index).sum().item()
n_video_features = video_features.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1)
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
video_features = video_features.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, video_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
num_logits_to_keep=num_logits_to_keep,
)
logits = outputs[0]
loss = None
if labels is not None:
# Shift so that tokens < n predict n
if attention_mask is not None:
# we use the input attention mask to shift the logits and labels, because it is 2D.
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device)
shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
else:
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return LlavaNextVideoCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
video_hidden_states=video_features if pixel_values_videos is not None else None,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_values_videos=None,
image_sizes=None,
attention_mask=None,
cache_position=None,
num_logits_to_keep=None,
**kwargs,
):
# Overwritten -- extra custom processing
model_inputs = self.language_model.prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
num_logits_to_keep=num_logits_to_keep,
**kwargs,
)
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
if cache_position[0] == 0:
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_values_videos"] = pixel_values_videos
model_inputs["image_sizes"] = image_sizes
return model_inputs
def get_video_features(
self, pixel_values: torch.FloatTensor, vision_feature_layer: int, vision_feature_select_strategy: str
):
"""
Obtains video last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
The tensors corresponding to the input video.
vision_feature_layer (`int`):
The index of the layer to select the vision feature.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
video_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches
and are of shape `(num_videos, video_length, embed_dim)`).
"""
batch_size, frames, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channels, height, width)
video_features = self.vision_tower(pixel_values, output_hidden_states=True)
selected_video_features = video_features.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_video_features = selected_video_features[:, 1:]
elif vision_feature_select_strategy == "full":
selected_video_features = selected_video_features
# Same as image features except that video has pooling layer
video_features = self.vision_resampler(selected_video_features)
video_features = self.multi_modal_projector(video_features)
video_features = torch.split(video_features, frames, dim=0)
return video_features
|
class_definition
| 19,858 | 56,613 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py
| null | 5,154 |
class FalconMambaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FalconMambaModel`]. It is used to instantiate a FALCON_MAMBA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FALCON_MAMBA
[tiiuae/falcon-mamba-7b](https://huggingface.co/tiiuae/falcon-mamba-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50280):
Vocabulary size of the FALCON_MAMBA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconMambaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary.
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
use_bias (`bool`, *optional*, defaults to `False`):
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.1):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_scale (`float`, *optional*, defaults to 1.0):
Scale used used to scale `dt_proj.bias`.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
Whether or not to rescale `out_proj` weights when initializing.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the cache should be used.
use_mambapy (`bool`, *optional*, defaults to `False`):
Determines the fallback strategy during training if the CUDA-based official implementation of FalconMamba is not avaiable. If `True`, the falcon_mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited.
mixer_rms_eps (`float`, *optional*, defaults to 1e-06):
The RMS norm epsilon value that is used in the Mixer RMS norm for B, C and dt states.
Example:
```python
>>> from transformers import FalconMambaConfig, FalconMambaModel
>>> # Initializing a FalconMamba configuration
>>> configuration = FalconMambaConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FalconMambaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "falcon_mamba"
def __init__(
self,
vocab_size=50280,
hidden_size=768,
state_size=16,
num_hidden_layers=32,
layer_norm_epsilon=1e-5,
pad_token_id=0,
bos_token_id=0,
eos_token_id=0,
expand=2,
conv_kernel=4,
use_bias=False,
use_conv_bias=True,
hidden_act="silu",
initializer_range=0.1,
residual_in_fp32=True,
time_step_rank="auto",
time_step_scale=1.0,
time_step_min=0.001,
time_step_max=0.1,
time_step_init_scheme="random",
time_step_floor=1e-4,
rescale_prenorm_residual=False,
use_cache=True,
use_mambapy=False,
mixer_rms_eps=1e-6,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.state_size = state_size
self.num_hidden_layers = num_hidden_layers
self.layer_norm_epsilon = layer_norm_epsilon
self.conv_kernel = conv_kernel
self.expand = expand
self.intermediate_size = int(expand * self.hidden_size)
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.use_bias = use_bias
self.use_conv_bias = use_conv_bias
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
self.time_step_scale = time_step_scale
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_init_scheme = time_step_init_scheme
self.time_step_floor = time_step_floor
self.rescale_prenorm_residual = rescale_prenorm_residual
self.residual_in_fp32 = residual_in_fp32
self.use_cache = use_cache
self.use_mambapy = use_mambapy
self.mixer_rms_eps = mixer_rms_eps
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
|
class_definition
| 774 | 7,727 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py
| null | 5,155 |
class FalconMambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see FalconMamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between FalconMamba and the linear time invariant S4,
and is why FalconMamba is called **selective** state spaces)
"""
def __init__(self, config: FalconMambaConfig, layer_idx: int):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.state_size
self.conv_kernel_size = config.conv_kernel
self.intermediate_size = config.intermediate_size
self.time_step_rank = int(config.time_step_rank)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.conv1d = nn.Conv1d(
in_channels=self.intermediate_size,
out_channels=self.intermediate_size,
bias=config.use_conv_bias,
kernel_size=config.conv_kernel,
groups=self.intermediate_size,
padding=config.conv_kernel - 1,
)
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_mambapy = config.use_mambapy
# projection of the input hidden states
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias)
# selective projection used to make dt, B and C input dependant
self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
# time step projection (discretization)
self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(self.intermediate_size, -1).contiguous()
self.A_log = nn.Parameter(torch.log(A))
self.D = nn.Parameter(torch.ones(self.intermediate_size))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
self.use_bias = config.use_bias
# Triton expects to pass RMS weights even if they are non learnable, thus we need to create these weights here
self.register_buffer(
"b_c_rms", torch.nn.Parameter(torch.ones(self.ssm_state_size), requires_grad=False), persistent=False
)
self.register_buffer(
"dt_rms", torch.nn.Parameter(torch.ones(self.intermediate_size), requires_grad=False), persistent=False
)
self.rms_eps = config.mixer_rms_eps
if not is_fast_path_available:
if self.use_mambapy:
if is_mambapy_available():
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
raise ImportError(
"use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py."
)
else:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py."
)
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[MambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states).transpose(1, 2)
if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
contextualized_states = mamba_inner_fn(
projected_states,
self.conv1d.weight,
self.conv1d.bias if self.use_conv_bias else None,
self.x_proj.weight,
self.dt_proj.weight,
self.out_proj.weight,
self.out_proj.bias.float() if self.use_bias else None,
-torch.exp(self.A_log.float()),
None, # input-dependent B
None, # input-dependent C
self.D.float(),
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
b_rms_weight=self.b_c_rms,
c_rms_weight=self.b_c_rms,
dt_rms_weight=self.dt_rms,
b_c_dt_rms_eps=self.rms_eps,
)
else:
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if cache_params is not None and cache_position[0] > 0:
hidden_states = causal_conv1d_update(
hidden_states.squeeze(-1),
cache_params.conv_states[self.layer_idx],
conv_weights,
self.conv1d.bias,
self.activation,
)
hidden_states = hidden_states.unsqueeze(-1)
else:
if cache_params is not None:
conv_states = nn.functional.pad(
hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
hidden_states = causal_conv1d_fn(
hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. input varying initialization of time_step, B and C
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
# In case the model has been quantized, we need a hack to properly call the `nn.Linear` module
# at the price of a small overhead.
if hasattr(self.config, "_pre_quantization_dtype"):
discrete_time_step = (self.dt_proj(time_step) - self.dt_proj.bias).transpose(1, 2)
else:
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
A = -torch.exp(self.A_log.float())
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
if cache_params is not None and cache_position[0] > 0:
scan_outputs = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states[..., 0],
discrete_time_step[..., 0],
A,
B[:, 0],
C[:, 0],
self.D,
gate[..., 0],
time_proj_bias,
dt_softplus=True,
).unsqueeze(-1)
else:
scan_outputs, ssm_state = selective_scan_fn(
hidden_states,
discrete_time_step,
A,
B.transpose(1, 2),
C.transpose(1, 2),
self.D.float(),
gate,
time_proj_bias,
delta_softplus=True,
return_last_state=True,
)
if ssm_state is not None and cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(
self,
input_states,
cache_params: Optional[MambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
# use `cache_position.shape[0]` to check whether we are in prefill
# stage, it's equivalent to check `cache_position[0] == 0`, which
# breaks dynamo fullgraph constraints
if cache_position is not None and cache_position.shape[0] == self.conv_kernel_size:
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.update_conv_state(self.layer_idx, conv_state, cache_position)
hidden_states = self.act(
self.conv1d(hidden_states)[..., :seq_len]
) # [batch, intermediate_size, seq_len]
else:
conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position)
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = (
self.act(hidden_states).to(dtype).unsqueeze(-1)
) # [batch, intermediate_size, 1] : decoding
else:
ssm_state = torch.zeros(
(batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(
1, 2
) # [batch, intermediate_size, seq_len]
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
discrete_A = torch.exp(
A[None, :, None, :] * discrete_time_step[:, :, :, None]
) # [batch, intermediate_size, seq_len, ssm_state_size]
discrete_B = (
discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
) # [batch, intermediate_size, seq_len, ssm_state_size]
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
if self.use_mambapy and self.training and cache_params is None:
hs = pscan(
discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2)
) # [batch, seq_len, intermediate_size, ssm_state_size]
scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2) # [batch, intermediate_size, seq_len]
scan_output = scan_output + hidden_states * self.D[None, :, None]
scan_output = scan_output * self.act(gate)
else:
scan_outputs = []
for i in range(seq_len):
ssm_state = (
discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
) # [batch, intermediate_size, ssm_state]
scan_output = torch.matmul(
ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)
) # [batch, intermediate_size, 1]
scan_outputs.append(scan_output[:, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediate_size, seq_len]
scan_output = scan_output + (hidden_states * self.D[None, :, None])
scan_output = scan_output * self.act(gate)
if cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
return contextualized_states
# Copied from transformers.models.mamba.modeling_mamba.MambaMixer.forward
def forward(
self,
hidden_states,
cache_params: Optional[MambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
if is_fast_path_available and "cuda" in self.x_proj.weight.device.type and not torch._dynamo.is_compiling():
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
|
class_definition
| 2,915 | 18,337 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,156 |
class FalconMambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
FalconMambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def extra_repr(self):
return f"{self.weight.shape[0]}, eps={self.variance_epsilon}"
# Ignore copy
def forward(self, hidden_states):
return self.weight.to(hidden_states.device) * rms_forward(
hidden_states, variance_epsilon=self.variance_epsilon
)
|
class_definition
| 18,432 | 19,032 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,157 |
class FalconMambaBlock(nn.Module):
def __init__(self, config, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.residual_in_fp32 = config.residual_in_fp32
self.norm = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.mixer = FalconMambaMixer(config, layer_idx=layer_idx)
def forward(
self,
hidden_states,
cache_params: Optional[MambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
residual = hidden_states
hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mixer(
hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask
)
hidden_states = residual + hidden_states
return hidden_states
|
class_definition
| 19,154 | 20,205 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,158 |
class FalconMambaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = FalconMambaConfig
base_model_prefix = "backbone"
_no_split_modules = ["FalconMambaBlock", "FalconMambaMixer"]
supports_gradient_checkpointing = True
_is_stateful = True
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, FalconMambaMixer):
module.A_log._no_weight_decay = True
module.D._no_weight_decay = True
dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale
if self.config.time_step_init_scheme == "constant":
nn.init.constant_(module.dt_proj.weight, dt_init_std)
elif self.config.time_step_init_scheme == "random":
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
dt = torch.exp(
torch.rand(self.config.intermediate_size)
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ math.log(self.config.time_step_min)
).clamp(min=self.config.time_step_floor)
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
inv_dt = dt + torch.log(-torch.expm1(-dt))
with torch.no_grad():
module.dt_proj.bias.copy_(inv_dt)
module.dt_proj.bias._no_reinit = True
if isinstance(module, nn.Linear):
if module.bias is not None:
if not getattr(module.bias, "_no_reinit", False):
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=self.config.initializer_range)
if self.config.rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
# We need to reinit p since this code could be called multiple times
# Having just p *= scale would repeatedly scale it down
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
with torch.no_grad():
p /= math.sqrt(self.config.num_hidden_layers)
|
class_definition
| 20,308 | 23,405 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,159 |
class FalconMambaOutput(ModelOutput):
"""
Class for the FALCONMAMBA model outputs.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cache_params (`MambaCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
cache_params: Optional[MambaCache] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 23,558 | 24,837 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,160 |
class FalconMambaCausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cache_params (`MambaCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
cache_params: Optional[MambaCache] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 24,979 | 26,511 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,161 |
class FalconMambaModel(FalconMambaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[FalconMambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
self.norm_f = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
@add_start_docstrings_to_model_forward(FALCONMAMBA_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=FalconMambaOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
cache_params: Optional[MambaCache] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
) -> Union[Tuple, FalconMambaOutput]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
if self.gradient_checkpointing and self.training and use_cache:
use_cache = False
if use_cache:
if cache_params is None:
cache_params = MambaCache(
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
)
cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
elif cache_position is None:
# cases when we do manual forward instead of using `model.generate` which will initiate
# `cache_position` and makes sure it is not None, throw error here instead of doing some
# hack to conjecture the current cache position
raise ValueError(
"You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, "
"you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will "
"be initialized for you automatically"
)
else:
cache_params = None
hidden_states = inputs_embeds
all_hidden_states = () if output_hidden_states else None
for mixer_block in self.layers:
if self.gradient_checkpointing and self.training:
hidden_states = self._gradient_checkpointing_func(
mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask
)
else:
hidden_states = mixer_block(
hidden_states,
cache_params=cache_params,
cache_position=cache_position,
attention_mask=attention_mask,
)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.norm_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
return FalconMambaOutput(
last_hidden_state=hidden_states,
cache_params=cache_params if use_cache else None,
hidden_states=all_hidden_states,
)
|
class_definition
| 29,265 | 33,743 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,162 |
class FalconMambaForCausalLM(FalconMambaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.backbone = FalconMambaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.backbone.set_input_embeddings(new_embeddings)
def _update_model_kwargs_for_generation(
self, outputs: ModelOutput, model_kwargs: Dict[str, Any], num_new_tokens: int = 1, **kwargs
) -> Dict[str, Any]:
model_kwargs["cache_params"] = outputs.get("cache_params", None)
if (
model_kwargs.get("use_cache", True)
and "cache_position" in model_kwargs
and model_kwargs["cache_position"] is not None
):
model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
return model_kwargs
def prepare_inputs_for_generation(
self,
input_ids,
inputs_embeds=None,
use_cache=None,
cache_params: Optional[MambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
**kwargs,
):
# Overwitten -- uses `cache_params` as opposed to `past_key_values`
if use_cache:
# `cache_position` should have been initialized in `generate`
if cache_position is None:
raise ValueError(
"`cache_position` should not be None as it should have been initialized in "
"`model.generate`, you are responsible for passing in a valid `cache_position` if "
"you are calling `prepare_inputs_for_generation` directly with `use_cache=True`"
)
if cache_position[0] > 0:
input_ids = input_ids[:, -1].unsqueeze(-1)
if attention_mask is not None:
attention_mask = None
else:
# we initialize the `cache_position` to full size of `conv_states` at prefill stage
# considering padding will be applied when input length is shorter, and truncation
# will be applied when it is longer, so it will be equivalent to always have it match
# the length of `cache_params.conv_states`, which is `config.conv_kernel`
cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device)
if inputs_embeds is not None and cache_params is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids.contiguous()}
model_inputs.update(
{
"cache_params": cache_params,
"use_cache": use_cache,
"cache_position": cache_position,
"attention_mask": attention_mask,
}
)
return model_inputs
@add_start_docstrings_to_model_forward(FALCONMAMBA_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=FalconMambaCausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_params: Optional[MambaCache] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs, # for now we need this for generation
) -> Union[Tuple, FalconMambaCausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
falcon_mamba_outputs = self.backbone(
input_ids,
cache_params=cache_params,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
use_cache=use_cache,
cache_position=cache_position,
attention_mask=attention_mask,
)
hidden_states = falcon_mamba_outputs[0]
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(logits.device)
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (logits,) + falcon_mamba_outputs[1:]
return ((loss,) + output) if loss is not None else output
return FalconMambaCausalLMOutput(
loss=loss,
logits=logits,
cache_params=falcon_mamba_outputs.cache_params,
hidden_states=falcon_mamba_outputs.hidden_states,
)
|
class_definition
| 34,122 | 40,556 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
| null | 5,163 |
class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the FocalNet
[microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch in the embeddings layer.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
use_conv_embed (`bool`, *optional*, defaults to `False`):
Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
improve the performance, but it's not used by default.
hidden_sizes (`List[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
Dimensionality (hidden size) at each stage.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth (number of layers) of each stage in the encoder.
focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
Number of focal levels in each layer of the respective stages in the encoder.
focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
Focal window size in each layer of the respective stages in the encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
use_layerscale (`bool`, *optional*, defaults to `False`):
Whether to use layer scale in the encoder.
layerscale_value (`float`, *optional*, defaults to 0.0001):
The initial value of the layer scale.
use_post_layernorm (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the encoder.
use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the modulation layer.
normalize_modulator (`bool`, *optional*, defaults to `False`):
Whether to normalize the modulator.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`List[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`List[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import FocalNetConfig, FocalNetModel
>>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
>>> configuration = FocalNetConfig()
>>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
>>> model = FocalNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "focalnet"
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
use_conv_embed=False,
hidden_sizes=[192, 384, 768, 768],
depths=[2, 2, 6, 2],
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
hidden_act="gelu",
mlp_ratio=4.0,
hidden_dropout_prob=0.0,
drop_path_rate=0.1,
use_layerscale=False,
layerscale_value=1e-4,
use_post_layernorm=False,
use_post_layernorm_in_modulation=False,
normalize_modulator=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
encoder_stride=32,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.use_conv_embed = use_conv_embed
self.hidden_sizes = hidden_sizes
self.depths = depths
self.focal_levels = focal_levels
self.focal_windows = focal_windows
self.hidden_act = hidden_act
self.mlp_ratio = mlp_ratio
self.hidden_dropout_prob = hidden_dropout_prob
self.drop_path_rate = drop_path_rate
self.use_layerscale = use_layerscale
self.layerscale_value = layerscale_value
self.use_post_layernorm = use_post_layernorm
self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation
self.normalize_modulator = normalize_modulator
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.encoder_stride = encoder_stride
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
|
class_definition
| 885 | 8,025 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/configuration_focalnet.py
| null | 5,164 |
class FocalNetEncoderOutput(ModelOutput):
"""
FocalNet encoder's outputs, with potential hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 1,687 | 3,139 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,165 |
class FocalNetModelOutput(ModelOutput):
"""
FocalNet model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 3,153 | 4,883 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,166 |
class FocalNetMaskedImageModelingOutput(ModelOutput):
"""
FocalNet masked image model outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 4,897 | 6,488 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,167 |
class FocalNetImageClassifierOutput(ModelOutput):
"""
FocalNet outputs for image classification.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
class_definition
| 6,502 | 8,139 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,168 |
class FocalNetEmbeddings(nn.Module):
"""
Construct the patch embeddings and layernorm. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = FocalNetPatchEmbeddings(
config=config,
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.embed_dim,
use_conv_embed=config.use_conv_embed,
is_stem=True,
)
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
) -> Tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
|
class_definition
| 8,142 | 9,740 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,169 |
class FocalNetPatchEmbeddings(nn.Module):
def __init__(
self,
config,
image_size,
patch_size,
num_channels,
embed_dim,
add_norm=False,
use_conv_embed=False,
is_stem=False,
):
super().__init__()
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
if use_conv_embed:
# if we choose to use conv embedding, then we treat the stem and non-stem differently
if is_stem:
kernel_size = 7
padding = 2
stride = 4
else:
kernel_size = 3
padding = 1
stride = 2
self.projection = nn.Conv2d(
num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
else:
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
if add_norm:
self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
else:
self.norm = None
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
_, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
if self.norm is not None:
embeddings = self.norm(embeddings)
return embeddings, output_dimensions
|
class_definition
| 9,743 | 12,657 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,170 |
class FocalNetDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
|
class_definition
| 13,901 | 14,383 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,171 |
class FocalNetModulation(nn.Module):
def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
super().__init__()
self.dim = dim
self.focal_window = config.focal_windows[index]
self.focal_level = config.focal_levels[index]
self.focal_factor = focal_factor
self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
self.normalize_modulator = config.normalize_modulator
self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
self.activation = nn.GELU()
self.projection_out = nn.Linear(dim, dim)
self.projection_dropout = nn.Dropout(projection_dropout)
self.focal_layers = nn.ModuleList()
self.kernel_sizes = []
for k in range(self.focal_level):
kernel_size = self.focal_factor * k + self.focal_window
self.focal_layers.append(
nn.Sequential(
nn.Conv2d(
dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
),
nn.GELU(),
)
)
self.kernel_sizes.append(kernel_size)
if self.use_post_layernorm_in_modulation:
self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def forward(self, hidden_state):
"""
Args:
hidden_state:
Input features with shape of (batch_size, height, width, num_channels)
"""
num_channels = hidden_state.shape[-1]
# pre linear projection
x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
# context aggreation
ctx_all = 0
for level in range(self.focal_level):
ctx = self.focal_layers[level](ctx)
ctx_all = ctx_all + ctx * self.gates[:, level : level + 1]
ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :]
# normalize context
if self.normalize_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
# focal modulation
self.modulator = self.projection_context(ctx_all)
x_out = q * self.modulator
x_out = x_out.permute(0, 2, 3, 1).contiguous()
if self.use_post_layernorm_in_modulation:
x_out = self.layernorm(x_out)
# post linear porjection
x_out = self.projection_out(x_out)
x_out = self.projection_dropout(x_out)
return x_out
|
class_definition
| 14,386 | 17,220 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,172 |
class FocalNetMlp(nn.Module):
def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.activation = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, hidden_state):
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.drop(hidden_state)
hidden_state = self.fc2(hidden_state)
hidden_state = self.drop(hidden_state)
return hidden_state
|
class_definition
| 17,223 | 17,996 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,173 |
class FocalNetLayer(nn.Module):
r"""Focal Modulation Network layer (block).
Args:
config (`FocalNetConfig`):
Model config.
index (`int`):
Layer index.
dim (`int`):
Number of input channels.
input_resolution (`Tuple[int]`):
Input resulotion.
drop_path (`float`, *optional*, defaults to 0.0):
Stochastic depth rate.
"""
def __init__(self, config, index, dim, input_resolution, drop_path=0.0):
super().__init__()
self.config = config
# layer-specific attributes
self.dim = dim
self.input_resolution = input_resolution
# general attributes
self.drop = config.hidden_dropout_prob
self.use_post_layernorm = config.use_post_layernorm
self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.modulation = FocalNetModulation(
config=config,
index=index,
dim=dim,
projection_dropout=self.drop,
)
self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
mlp_hidden_dim = int(dim * config.mlp_ratio)
self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)
self.gamma_1 = 1.0
self.gamma_2 = 1.0
if config.use_layerscale:
self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True)
def forward(self, hidden_state, input_dimensions):
height, width = input_dimensions
batch_size, _, num_channels = hidden_state.shape
shortcut = hidden_state
# Focal Modulation
hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)
hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)
# FFN
hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)
hidden_state = hidden_state + self.drop_path(
self.gamma_2
* (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))
)
return hidden_state
|
class_definition
| 17,999 | 20,584 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,174 |
class FocalNetStage(nn.Module):
def __init__(self, config, index, input_resolution):
super().__init__()
self.config = config
self.num_stages = len(config.depths)
embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)]
dim = embed_dim[index]
out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None
downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])]
self.layers = nn.ModuleList(
[
FocalNetLayer(
config=config,
index=index,
dim=dim,
input_resolution=input_resolution,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(config.depths[index])
]
)
if downsample is not None:
self.downsample = downsample(
config=config,
image_size=input_resolution,
patch_size=2,
num_channels=dim,
embed_dim=out_dim,
add_norm=True,
use_conv_embed=config.use_conv_embed,
is_stem=False,
)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]:
height, width = input_dimensions
for layer_module in self.layers:
hidden_states = layer_module(hidden_states, input_dimensions)
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height, width = input_dimensions
hidden_states = hidden_states.transpose(1, 2).reshape(
hidden_states_before_downsampling.shape[0], -1, height, width
)
hidden_states, output_dimensions = self.downsample(hidden_states)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
return stage_outputs
|
class_definition
| 20,587 | 23,013 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,175 |
class FocalNetEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_stages = len(config.depths)
self.config = config
self.stages = nn.ModuleList(
[
FocalNetStage(
config=config,
index=i_layer,
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
)
for i_layer in range(self.num_stages)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, FocalNetEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, stage_module in enumerate(self.stages):
if self.gradient_checkpointing and self.training:
stage_outputs = self._gradient_checkpointing_func(
stage_module.__call__,
hidden_states,
input_dimensions,
)
else:
stage_outputs = stage_module(hidden_states, input_dimensions)
hidden_states = stage_outputs[0]
hidden_states_before_downsampling = stage_outputs[1]
output_dimensions = stage_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return FocalNetEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
reshaped_hidden_states=all_reshaped_hidden_states,
)
|
class_definition
| 23,016 | 26,686 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,176 |
class FocalNetPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = FocalNetConfig
base_model_prefix = "focalnet"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["FocalNetStage"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
class_definition
| 26,797 | 27,764 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,177 |
class FocalNetModel(FocalNetPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
super().__init__(config)
self.config = config
self.num_stages = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=FocalNetModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FocalNetModelOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return FocalNetModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
|
class_definition
| 29,155 | 32,111 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,178 |
class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True)
self.num_stages = len(config.depths)
num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
self.decoder = nn.Sequential(
nn.Conv2d(
in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
),
nn.PixelShuffle(config.encoder_stride),
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FocalNetMaskedImageModelingOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
>>> config = FocalNetConfig()
>>> model = FocalNetForMaskedImageModeling(config)
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
>>> list(reconstructed_pixel_values.shape)
[1, 3, 192, 192]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.focalnet(
pixel_values,
bool_masked_pos=bool_masked_pos,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# Reshape to (batch_size, num_channels, height, width)
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = math.floor(sequence_length**0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
# Reconstruct pixel values
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = (
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
.repeat_interleave(self.config.patch_size, 2)
.unsqueeze(1)
.contiguous()
)
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
return FocalNetMaskedImageModelingOutput(
loss=masked_im_loss,
reconstruction=reconstructed_pixel_values,
hidden_states=outputs.hidden_states,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
|
class_definition
| 32,561 | 37,005 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,179 |
class FocalNetForImageClassification(FocalNetPreTrainedModel):
# Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.focalnet = FocalNetModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=FocalNetImageClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FocalNetImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.focalnet(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return FocalNetImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
|
class_definition
| 37,208 | 40,659 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,180 |
class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
def __init__(self, config: FocalNetConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + config.hidden_sizes
self.focalnet = FocalNetModel(config)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
|
class_definition
| 40,800 | 43,125 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/focalnet/modeling_focalnet.py
| null | 5,181 |
class TFResNetConvLayer(keras.layers.Layer):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
activation: str = "relu",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pad_value = kernel_size // 2
self.conv = keras.layers.Conv2D(
out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution"
)
# Use same default momentum and epsilon as PyTorch equivalent
self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.activation = ACT2FN[activation] if activation is not None else keras.layers.Activation("linear")
self.in_channels = in_channels
self.out_channels = out_channels
def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor:
# Pad to match that done in the PyTorch Conv2D model
height_pad = width_pad = (self.pad_value, self.pad_value)
hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)])
hidden_state = self.conv(hidden_state)
return hidden_state
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state, training=training)
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "conv", None) is not None:
with tf.name_scope(self.conv.name):
self.conv.build([None, None, None, self.in_channels])
if getattr(self, "normalization", None) is not None:
with tf.name_scope(self.normalization.name):
self.normalization.build([None, None, None, self.out_channels])
|
class_definition
| 1,633 | 3,624 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,182 |
class TFResNetEmbeddings(keras.layers.Layer):
"""
ResNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.embedder = TFResNetConvLayer(
config.num_channels,
config.embedding_size,
kernel_size=7,
stride=2,
activation=config.hidden_act,
name="embedder",
)
self.pooler = keras.layers.MaxPool2D(pool_size=3, strides=2, padding="valid", name="pooler")
self.num_channels = config.num_channels
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
_, _, _, num_channels = shape_list(pixel_values)
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
hidden_state = pixel_values
hidden_state = self.embedder(hidden_state)
hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]])
hidden_state = self.pooler(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embedder", None) is not None:
with tf.name_scope(self.embedder.name):
self.embedder.build(None)
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build(None)
|
class_definition
| 3,627 | 5,273 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,183 |
class TFResNetShortCut(keras.layers.Layer):
"""
ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs) -> None:
super().__init__(**kwargs)
self.convolution = keras.layers.Conv2D(
out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
)
# Use same default momentum and epsilon as PyTorch equivalent
self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.in_channels = in_channels
self.out_channels = out_channels
def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = x
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state, training=training)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "convolution", None) is not None:
with tf.name_scope(self.convolution.name):
self.convolution.build([None, None, None, self.in_channels])
if getattr(self, "normalization", None) is not None:
with tf.name_scope(self.normalization.name):
self.normalization.build([None, None, None, self.out_channels])
|
class_definition
| 5,276 | 6,782 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,184 |
class TFResNetBasicLayer(keras.layers.Layer):
"""
A classic ResNet's residual layer composed by two `3x3` convolutions.
"""
def __init__(
self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", **kwargs
) -> None:
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
self.conv1 = TFResNetConvLayer(in_channels, out_channels, stride=stride, name="layer.0")
self.conv2 = TFResNetConvLayer(out_channels, out_channels, activation=None, name="layer.1")
self.shortcut = (
TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else keras.layers.Activation("linear", name="shortcut")
)
self.activation = ACT2FN[activation]
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
residual = hidden_state
hidden_state = self.conv1(hidden_state, training=training)
hidden_state = self.conv2(hidden_state, training=training)
residual = self.shortcut(residual, training=training)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "conv1", None) is not None:
with tf.name_scope(self.conv1.name):
self.conv1.build(None)
if getattr(self, "conv2", None) is not None:
with tf.name_scope(self.conv2.name):
self.conv2.build(None)
if getattr(self, "shortcut", None) is not None:
with tf.name_scope(self.shortcut.name):
self.shortcut.build(None)
|
class_definition
| 6,785 | 8,603 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,185 |
class TFResNetBottleNeckLayer(keras.layers.Layer):
"""
A classic ResNet's bottleneck layer composed by three `3x3` convolutions.
The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3`
convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int = 1,
activation: str = "relu",
reduction: int = 4,
**kwargs,
) -> None:
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
reduces_channels = out_channels // reduction
self.conv0 = TFResNetConvLayer(in_channels, reduces_channels, kernel_size=1, name="layer.0")
self.conv1 = TFResNetConvLayer(reduces_channels, reduces_channels, stride=stride, name="layer.1")
self.conv2 = TFResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None, name="layer.2")
self.shortcut = (
TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else keras.layers.Activation("linear", name="shortcut")
)
self.activation = ACT2FN[activation]
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
residual = hidden_state
hidden_state = self.conv0(hidden_state, training=training)
hidden_state = self.conv1(hidden_state, training=training)
hidden_state = self.conv2(hidden_state, training=training)
residual = self.shortcut(residual, training=training)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "conv0", None) is not None:
with tf.name_scope(self.conv0.name):
self.conv0.build(None)
if getattr(self, "conv1", None) is not None:
with tf.name_scope(self.conv1.name):
self.conv1.build(None)
if getattr(self, "conv2", None) is not None:
with tf.name_scope(self.conv2.name):
self.conv2.build(None)
if getattr(self, "shortcut", None) is not None:
with tf.name_scope(self.shortcut.name):
self.shortcut.build(None)
|
class_definition
| 8,606 | 11,102 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,186 |
class TFResNetStage(keras.layers.Layer):
"""
A ResNet stage composed of stacked layers.
"""
def __init__(
self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
) -> None:
super().__init__(**kwargs)
layer = TFResNetBottleNeckLayer if config.layer_type == "bottleneck" else TFResNetBasicLayer
layers = [layer(in_channels, out_channels, stride=stride, activation=config.hidden_act, name="layers.0")]
layers += [
layer(out_channels, out_channels, activation=config.hidden_act, name=f"layers.{i + 1}")
for i in range(depth - 1)
]
self.stage_layers = layers
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
for layer in self.stage_layers:
hidden_state = layer(hidden_state, training=training)
return hidden_state
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "stage_layers", None) is not None:
for layer in self.stage_layers:
with tf.name_scope(layer.name):
layer.build(None)
|
class_definition
| 11,105 | 12,327 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,187 |
class TFResNetEncoder(keras.layers.Layer):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages = [
TFResNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
name="stages.0",
)
]
for i, (in_channels, out_channels, depth) in enumerate(
zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:])
):
self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}"))
def call(
self,
hidden_state: tf.Tensor,
output_hidden_states: bool = False,
return_dict: bool = True,
training: bool = False,
) -> TFBaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state, training=training)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "stages", None) is not None:
for layer in self.stages:
with tf.name_scope(layer.name):
layer.build(None)
|
class_definition
| 12,330 | 14,279 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,188 |
class TFResNetPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ResNetConfig
base_model_prefix = "resnet"
main_input_name = "pixel_values"
@property
def input_signature(self):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)}
|
class_definition
| 14,282 | 14,740 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,189 |
class TFResNetMainLayer(keras.layers.Layer):
config_class = ResNetConfig
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.config = config
self.embedder = TFResNetEmbeddings(config, name="embedder")
self.encoder = TFResNetEncoder(config, name="encoder")
self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# TF 2.0 image layers can't use NCHW format when running on CPU.
# We transpose to NHWC format and then transpose back after the full forward pass.
# (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
embedding_output = self.embedder(pixel_values, training=training)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Transpose all the outputs to the NCHW format
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
last_hidden_state = tf.transpose(last_hidden_state, (0, 3, 1, 2))
pooled_output = tf.transpose(pooled_output, (0, 3, 1, 2))
hidden_states = ()
for hidden_state in encoder_outputs[1:]:
hidden_states = hidden_states + tuple(tf.transpose(h, (0, 3, 1, 2)) for h in hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + hidden_states
hidden_states = hidden_states if output_hidden_states else None
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=hidden_states,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embedder", None) is not None:
with tf.name_scope(self.embedder.name):
self.embedder.build(None)
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
|
class_definition
| 16,020 | 18,887 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,190 |
class TFResNetModel(TFResNetPreTrainedModel):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(config, **kwargs)
self.resnet = TFResNetMainLayer(config=config, name="resnet")
@add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
resnet_outputs = self.resnet(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return resnet_outputs
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "resnet", None) is not None:
with tf.name_scope(self.resnet.name):
self.resnet.build(None)
|
class_definition
| 19,030 | 20,601 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,191 |
class TFResNetForImageClassification(TFResNetPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(config, **kwargs)
self.num_labels = config.num_labels
self.resnet = TFResNetMainLayer(config, name="resnet")
# classification head
self.classifier_layer = (
keras.layers.Dense(config.num_labels, name="classifier.1")
if config.num_labels > 0
else keras.layers.Activation("linear", name="classifier.1")
)
self.config = config
def classifier(self, x: tf.Tensor) -> tf.Tensor:
x = keras.layers.Flatten()(x)
logits = self.classifier_layer(x)
return logits
@add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=TFImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor = None,
labels: tf.Tensor = None,
output_hidden_states: bool = None,
return_dict: bool = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], TFImageClassifierOutputWithNoAttention]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.resnet(
pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "resnet", None) is not None:
with tf.name_scope(self.resnet.name):
self.resnet.build(None)
if getattr(self, "classifier_layer", None) is not None:
with tf.name_scope(self.classifier_layer.name):
self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]])
|
class_definition
| 20,803 | 23,649 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_tf_resnet.py
| null | 5,192 |
class Tracker:
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
if has_not_submodules:
self.traced.append(m)
def __call__(self, x: Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
|
class_definition
| 1,117 | 1,961 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/convert_resnet_to_pytorch.py
| null | 5,193 |
class ModuleTransfer:
src: nn.Module
dest: nn.Module
verbose: int = 0
src_skip: List = field(default_factory=list)
dest_skip: List = field(default_factory=list)
def __call__(self, x: Tensor):
"""
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
hood we tracked all the operations in both modules.
"""
dest_traced = Tracker(self.dest)(x).parametrized
src_traced = Tracker(self.src)(x).parametrized
src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
if len(dest_traced) != len(src_traced):
raise Exception(
f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
f" destination module has {len(dest_traced)}."
)
for dest_m, src_m in zip(dest_traced, src_traced):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
|
class_definition
| 1,975 | 3,155 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/convert_resnet_to_pytorch.py
| null | 5,194 |
class Identity(nn.Module):
"""Identity function."""
@nn.compact
def __call__(self, x, **kwargs):
return x
|
class_definition
| 3,996 | 4,122 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_flax_resnet.py
| null | 5,195 |
class FlaxResNetConvLayer(nn.Module):
out_channels: int
kernel_size: int = 3
stride: int = 1
activation: Optional[str] = "relu"
dtype: jnp.dtype = jnp.float32
def setup(self):
self.convolution = nn.Conv(
self.out_channels,
kernel_size=(self.kernel_size, self.kernel_size),
strides=self.stride,
padding=self.kernel_size // 2,
dtype=self.dtype,
use_bias=False,
kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="normal", dtype=self.dtype),
)
self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype)
self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity()
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
hidden_state = self.convolution(x)
hidden_state = self.normalization(hidden_state, use_running_average=deterministic)
hidden_state = self.activation_func(hidden_state)
return hidden_state
|
class_definition
| 4,125 | 5,213 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_flax_resnet.py
| null | 5,196 |
class FlaxResNetEmbeddings(nn.Module):
"""
ResNet Embeddings (stem) composed of a single aggressive convolution.
"""
config: ResNetConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.embedder = FlaxResNetConvLayer(
self.config.embedding_size,
kernel_size=7,
stride=2,
activation=self.config.hidden_act,
dtype=self.dtype,
)
self.max_pool = partial(nn.max_pool, window_shape=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)))
def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
num_channels = pixel_values.shape[-1]
if num_channels != self.config.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.embedder(pixel_values, deterministic=deterministic)
embedding = self.max_pool(embedding)
return embedding
|
class_definition
| 5,216 | 6,262 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_flax_resnet.py
| null | 5,197 |
class FlaxResNetShortCut(nn.Module):
"""
ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
out_channels: int
stride: int = 2
dtype: jnp.dtype = jnp.float32
def setup(self):
self.convolution = nn.Conv(
self.out_channels,
kernel_size=(1, 1),
strides=self.stride,
use_bias=False,
kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"),
dtype=self.dtype,
)
self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype)
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
hidden_state = self.convolution(x)
hidden_state = self.normalization(hidden_state, use_running_average=deterministic)
return hidden_state
|
class_definition
| 6,265 | 7,217 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_flax_resnet.py
| null | 5,198 |
class FlaxResNetBasicLayerCollection(nn.Module):
out_channels: int
stride: int = 1
dtype: jnp.dtype = jnp.float32
def setup(self):
self.layer = [
FlaxResNetConvLayer(self.out_channels, stride=self.stride, dtype=self.dtype),
FlaxResNetConvLayer(self.out_channels, activation=None, dtype=self.dtype),
]
def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
for layer in self.layer:
hidden_state = layer(hidden_state, deterministic=deterministic)
return hidden_state
|
class_definition
| 7,220 | 7,809 | 0 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/resnet/modeling_flax_resnet.py
| null | 5,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.