code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import math
def a__ ( lowerCAmelCase__ = 100 ):
UpperCAmelCase_ = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowercase__ :
'''simple docstring'''
def __init__( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = psutil.Process()
UpperCAmelCase_ = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = -1
while True:
UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = threading.Thread(target=self.peak_monitor )
UpperCAmelCase_ = True
self.thread.start()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase = PeakCPUMemory()
def a__ ( ):
# Time
UpperCAmelCase_ = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE )
torch.cuda.reset_peak_memory_stats()
return measures
def a__ ( lowerCAmelCase__ ):
# Time
UpperCAmelCase_ = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = (torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
UpperCAmelCase_ = (torch.cuda.max_memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
return measures
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__SCREAMING_SNAKE_CASE )]:.2f}MiB""" )
UpperCAmelCase_ = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = args.log_outputs
UpperCAmelCase_ = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase_ = load_metric("wer" )
UpperCAmelCase_ = load_metric("cer" )
# compute metrics
UpperCAmelCase_ = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase_ = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase_ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(snake_case_ )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase_ = f"""log_{dataset_id}_predictions.txt"""
UpperCAmelCase_ = f"""log_{dataset_id}_targets.txt"""
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ , lowerCAmelCase__ ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase_ = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase_ = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase_ = " ".join(text.split(snake_case_ ) )
return text
def a__ ( lowerCAmelCase__ ):
# load dataset
UpperCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase_ = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase_ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase_ = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ ):
UpperCAmelCase_ = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase_ = prediction["text"]
UpperCAmelCase_ = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase_ = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase = parser.parse_args()
main(args)
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase = """\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"""
lowerCamelCase = """\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"""
lowerCamelCase = """\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase_ = evaluate(dataset=_a , predictions=_a )
return score | 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase = False
try:
lowerCamelCase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str = None , _UpperCAmelCase : list = [] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = """*"""
else:
UpperCAmelCase_ = """➔ """
def lowercase__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str = "" ) -> List[str]:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a_ )
else:
forceWrite(self.choices[index] , a_ )
def lowercase__ ( self : List[str] , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(a_ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Direction , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a_ )
move_cursor(a_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a_ )] for number in range(10 )] )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a_ )
else:
return
else:
return
def lowercase__ ( self : Tuple , _UpperCAmelCase : int = 0 ) -> List[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a_ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a_ , "\n" )
return choice
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
UpperCAmelCase_ = torch.load(hf_hub_download(repo_id=__snake_case , filename="pytorch_model.bin" ) )
UpperCAmelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
UpperCAmelCase_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
UpperCAmelCase_ = tensor_value
UpperCAmelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 706 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger("""transformers.models.encodec""")
lowerCamelCase = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowerCamelCase = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowerCamelCase = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowerCamelCase = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowerCamelCase = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowerCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase = []
lowerCamelCase = []
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
UpperCAmelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_ = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(UpperCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , UpperCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase_ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_ = """weight_v"""
elif "weight_ih_l0" in name:
UpperCAmelCase_ = """weight_ih_l0"""
elif "weight_hh_l0" in name:
UpperCAmelCase_ = """weight_hh_l0"""
elif "bias_ih_l0" in name:
UpperCAmelCase_ = """bias_ih_l0"""
elif "bias_hh_l0" in name:
UpperCAmelCase_ = """bias_hh_l0"""
elif "weight_ih_l1" in name:
UpperCAmelCase_ = """weight_ih_l1"""
elif "weight_hh_l1" in name:
UpperCAmelCase_ = """weight_hh_l1"""
elif "bias_ih_l1" in name:
UpperCAmelCase_ = """bias_ih_l1"""
elif "bias_hh_l1" in name:
UpperCAmelCase_ = """bias_hh_l1"""
elif "bias" in name:
UpperCAmelCase_ = """bias"""
elif "weight" in name:
UpperCAmelCase_ = """weight"""
elif "running_mean" in name:
UpperCAmelCase_ = """running_mean"""
elif "running_var" in name:
UpperCAmelCase_ = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase_ = """num_batches_tracked"""
else:
UpperCAmelCase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
if config_path is not None:
UpperCAmelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCAmelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ = [8, 5, 4, 4]
UpperCAmelCase_ = [2.2]
UpperCAmelCase_ = 64
UpperCAmelCase_ = 32000
UpperCAmelCase_ = 2048
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ = [8, 5, 4, 2]
UpperCAmelCase_ = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase_ = 48000
UpperCAmelCase_ = 2
UpperCAmelCase_ = False
UpperCAmelCase_ = """time_group_norm"""
UpperCAmelCase_ = True
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
UpperCAmelCase_ = EncodecModel(UpperCAmelCase__ )
UpperCAmelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = torch.load(UpperCAmelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ = original_checkpoint["""best_state"""]
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCAmelCase__ )
model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 707 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowercase__ ( a__ ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int] = "▁" , _UpperCAmelCase : Any = True , _UpperCAmelCase : Tuple = "<unk>" , _UpperCAmelCase : Optional[int] = "</s>" , _UpperCAmelCase : List[str] = "<pad>" , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
UpperCAmelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase_ = token_dict["token"]
UpperCAmelCase_ = Tokenizer(Unigram() )
UpperCAmelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
UpperCAmelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ),
pre_tokenizers.Digits(individual_digits=lowerCAmelCase__ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase_ = decoders.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
UpperCAmelCase_ = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
UpperCAmelCase_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str = 8000 , _UpperCAmelCase : Any = True , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [files]
self._tokenizer.train(lowerCAmelCase__ , trainer=lowerCAmelCase__ )
self.add_unk_id()
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] = 8000 , _UpperCAmelCase : Tuple = True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
self._tokenizer.train_from_iterator(lowerCAmelCase__ , trainer=lowerCAmelCase__ )
self.add_unk_id()
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = json.loads(self._tokenizer.to_str() )
UpperCAmelCase_ = self.special_tokens["unk"]["id"]
UpperCAmelCase_ = Tokenizer.from_str(json.dumps(lowerCAmelCase__ ) )
| 708 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase__ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=__lowerCAmelCase , )
def lowercase__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class lowercase__ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=__lowerCAmelCase , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def a__ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def a__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@require_beam
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase_ = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase_ = partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
lowerCamelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
lowerCamelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def lowercase__ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=False ) -> List[Any]:
'''simple docstring'''
if rouge_types is None:
UpperCAmelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
UpperCAmelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ , use_stemmer=UpperCAmelCase_ )
if use_aggregator:
UpperCAmelCase_ = scoring.BootstrapAggregator()
else:
UpperCAmelCase_ = []
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase_ = scorer.score(UpperCAmelCase_ , UpperCAmelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase_ )
else:
scores.append(UpperCAmelCase_ )
if use_aggregator:
UpperCAmelCase_ = aggregator.aggregate()
else:
UpperCAmelCase_ = {}
for key in scores[0]:
UpperCAmelCase_ = [score[key] for score in scores]
return result
| 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(_lowercase )
if n_element < 1:
UpperCAmelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCamelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"The list with nth numbers is: {hamming_numbers}")
print("""-----------------------------------------------------""")
| 711 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[Any]=30 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=2 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFDeiTModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFDeiTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFDeiTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 712 |
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Dict , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
UpperCAmelCase_ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
if self.streaming:
UpperCAmelCase_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
UpperCAmelCase_ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 713 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "width_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict="swish" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=10 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.0 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCAmelCase_ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""bert/{name}"""
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = session.run(SCREAMING_SNAKE_CASE_ )
print(f"""Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def a__ ( lowerCAmelCase__=None ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
UpperCAmelCase_ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ , UpperCAmelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ = result + left + right
return input_list
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return input_list
UpperCAmelCase_ = list(lowerCAmelCase__ )
# iteration for two-way merging
UpperCAmelCase_ = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i + p - 1
UpperCAmelCase_ = (low + high + 1) // 2
UpperCAmelCase_ = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase = []
else:
lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 14 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowercase__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=1024 , _UpperCAmelCase : str=1024 , _UpperCAmelCase : Dict=3.6 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = dataset
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = iter(self.dataset )
UpperCAmelCase_ = True
while more_examples:
UpperCAmelCase_ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A_ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase_ = False
break
UpperCAmelCase_ = tokenizer(A_ , truncation=A_ )["input_ids"]
UpperCAmelCase_ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A_ ) , self.seq_length ):
UpperCAmelCase_ = all_token_ids[i : i + self.seq_length]
if len(A_ ) == self.seq_length:
yield torch.tensor(A_ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"streaming": True}
UpperCAmelCase_ = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
UpperCAmelCase_ = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
UpperCAmelCase_ = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def a__ ( lowerCAmelCase__ ):
model.eval()
UpperCAmelCase_ = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
UpperCAmelCase_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
UpperCAmelCase_ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase_ = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
UpperCAmelCase_ = torch.exp(_lowerCAmelCase )
except OverflowError:
UpperCAmelCase_ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCamelCase = Accelerator()
# Parse configuration
lowerCamelCase = HfArgumentParser(EvaluationArguments)
lowerCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowerCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowerCamelCase = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 715 |
"""simple docstring"""
lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ):
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def a__ ( ):
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def a__ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 716 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size["shortest_edge"] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(_UpperCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowercase__ ( UpperCAmelCase__ ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCAmelCase__ , )
| 717 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase__ ( __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MobileBertTokenizer
UpperCamelCase = MobileBertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
UpperCamelCase = "google/mobilebert-uncased"
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = "unwanted, running"
return input_text, output_text
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 718 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : int = 32 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , _UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Any=30 , _UpperCAmelCase : Optional[Any]=400 , _UpperCAmelCase : int=3 , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {"""shortest_edge""": 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowercase__ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
if not batched:
UpperCAmelCase_ = self.size["""shortest_edge"""]
UpperCAmelCase_ = image_inputs[0]
if isinstance(__a , Image.Image ):
UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(__a , __a )
if h < w:
UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(__a , __a ) > max_size:
UpperCAmelCase_ = max_size / max(__a , __a )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__a , key=lambda _UpperCAmelCase : item[0] )[0]
UpperCAmelCase_ = max(__a , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "size_divisor" ) )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 719 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 1000 ):
UpperCAmelCase_ = 2**power
UpperCAmelCase_ = str(_UpperCamelCase )
UpperCAmelCase_ = list(_UpperCamelCase )
UpperCAmelCase_ = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
lowerCamelCase = solution(power)
print("""Sum of the digits is: """, result)
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
UpperCAmelCase_ = downstream_dict["projector.weight"]
UpperCAmelCase_ = downstream_dict["projector.bias"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.bias"]
return model
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
UpperCAmelCase_ = downstream_dict["model.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.linear.bias"]
return model
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
UpperCAmelCase_ = downstream_dict["connector.weight"]
UpperCAmelCase_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )
UpperCAmelCase_ = checkpoint["Downstream"]
UpperCAmelCase_ = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ = convert_classification(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ = convert_diarization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ = convert_xvector(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "cyberpunk 2077"
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase_ = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Tuple=30 , _UpperCAmelCase : List[str]=400 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=0.9 , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , _UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 30}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize_and_center_crop
UpperCAmelCase_ = size
UpperCAmelCase_ = crop_pct
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase__ ( __snake_case , unittest.TestCase ):
UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = PoolFormerImageProcessingTester(self )
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "crop_pct" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase = logging.getLogger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , "config.json" ) ):
os.remove(os.path.join(lowerCAmelCase_ , "config.json" ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = p * torch.log(lowerCAmelCase_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def a__ ( lowerCAmelCase__ ):
logger.info("lv, h >\t" + "\t".join(f"""{x + 1}""" for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=False ):
UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
(UpperCAmelCase_ ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCAmelCase_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCAmelCase_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(lowerCAmelCase_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(lowerCAmelCase_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
UpperCAmelCase_ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCAmelCase_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCAmelCase_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCAmelCase_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCAmelCase_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCAmelCase_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowerCAmelCase_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCAmelCase_ , help="Batch size." )
parser.add_argument("--seed" , type=lowerCAmelCase_ , default=42 )
parser.add_argument("--local_rank" , type=lowerCAmelCase_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(lowerCAmelCase_ ),)
UpperCAmelCase_ = TensorDataset(*lowerCAmelCase_ )
UpperCAmelCase_ = RandomSampler(lowerCAmelCase_ )
UpperCAmelCase_ = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""onnx"""]
def __init__( self : Union[str, Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> int:
'''simple docstring'''
requires_backends(self , ["onnx"] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["onnx"] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["onnx"] )
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCamelCase = logging.getLogger(__name__)
class lowercase__ :
'''simple docstring'''
def __init__( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if not self.initialized:
UpperCAmelCase_ = RagRetriever(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
UpperCAmelCase_ = True
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.retriever.index.init_index()
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.retriever._main_retrieve(__snake_case , __snake_case )
return doc_ids, retrieved_doc_embeds
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(__snake_case ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
UpperCAmelCase_ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__snake_case , __snake_case , __snake_case , __snake_case )
for worker in self.retrieval_workers
] )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase__ ( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase_ = ray.get(random_worker.retrieve.remote(__snake_case , __snake_case ) )
else:
UpperCAmelCase_ = self._main_retrieve(__snake_case , __snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case )
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
return super(__snake_case , cls ).get_tokenizers(__snake_case , __snake_case , **__snake_case )
@classmethod
def lowercase__ ( cls : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("config" , __snake_case ) or RagConfig.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase_ = RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ = rag_tokenizer.question_encoder
UpperCAmelCase_ = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase_ = '''custom'''
UpperCAmelCase_ = CustomHFIndex(config.retrieval_vector_size , __snake_case )
else:
UpperCAmelCase_ = cls._build_index(__snake_case )
return cls(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , retrieval_workers=__snake_case , index=__snake_case , ) | 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from functools import reduce
lowerCamelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def a__ ( lowerCAmelCase__ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : str(int(lowerCAmelCase__ ) * int(lowerCAmelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase__ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
UpperCAmelCase_ = []
for k, v in d.items():
UpperCAmelCase_ = parent_key + sep + k if parent_key else k
if isinstance(lowercase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase_ , lowercase_ , sep=lowercase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase_ )
UpperCAmelCase_ = argparse.Namespace()
with open(lowercase_ , "r" ) as yaml_file:
try:
UpperCAmelCase_ = yaml.load(lowercase_ , Loader=yaml.FullLoader )
UpperCAmelCase_ = flatten_yaml_as_dict(lowercase_ )
for k, v in flat_cfg.items():
setattr(lowercase_ , lowercase_ , lowercase_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowercase_ , str(lowercase_ ) ) )
return config
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = MobileViTVaConfig()
UpperCAmelCase_ = False
# dataset
if task_name.startswith("imagenet1k_" ):
UpperCAmelCase_ = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
UpperCAmelCase_ = 384
else:
UpperCAmelCase_ = 256
UpperCAmelCase_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
UpperCAmelCase_ = 21000
if int(task_name.strip().split("_" )[-1] ) == 384:
UpperCAmelCase_ = 384
else:
UpperCAmelCase_ = 256
UpperCAmelCase_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
UpperCAmelCase_ = 151
UpperCAmelCase_ = 512
UpperCAmelCase_ = "ade20k-id2label.json"
UpperCAmelCase_ = True
elif task_name.startswith("voc_" ):
UpperCAmelCase_ = 21
UpperCAmelCase_ = 512
UpperCAmelCase_ = "pascal-voc-id2label.json"
UpperCAmelCase_ = True
# orig_config
UpperCAmelCase_ = load_orig_config_file(lowercase_ )
assert getattr(lowercase_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase_ = getattr(lowercase_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(lowercase_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase_ = getattr(lowercase_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase_ = getattr(lowercase_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase_ = getattr(lowercase_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
UpperCAmelCase_ = getattr(lowercase_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
UpperCAmelCase_ = getattr(lowercase_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowercase_ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "mobilevitv2."
UpperCAmelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase_ = k[8:]
else:
UpperCAmelCase_ = k
if ".block." in k:
UpperCAmelCase_ = k_new.replace(".block." , "." )
if ".conv." in k:
UpperCAmelCase_ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
UpperCAmelCase_ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
UpperCAmelCase_ = k_new.replace("conv_1." , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
UpperCAmelCase_ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
UpperCAmelCase_ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
UpperCAmelCase_ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
UpperCAmelCase_ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
UpperCAmelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
UpperCAmelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase_ = [0, 1]
elif i == 4:
UpperCAmelCase_ = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase_ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCAmelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCAmelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
UpperCAmelCase_ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
UpperCAmelCase_ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
UpperCAmelCase_ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
UpperCAmelCase_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
UpperCAmelCase_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
UpperCAmelCase_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
UpperCAmelCase_ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
UpperCAmelCase_ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
UpperCAmelCase_ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
UpperCAmelCase_ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowercase_ )
for k in keys_to_ignore:
state_dict.pop(lowercase_ , lowercase_ )
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_mobilevitva_config(lowercase_ , lowercase_ )
# load original state_dict
UpperCAmelCase_ = torch.load(lowercase_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(lowercase_ ).eval()
UpperCAmelCase_ = False
else:
UpperCAmelCase_ = MobileViTVaForImageClassification(lowercase_ ).eval()
UpperCAmelCase_ = False
# remove and rename some keys of load the original model
UpperCAmelCase_ = checkpoint
remove_unused_keys(lowercase_ )
UpperCAmelCase_ = create_rename_keys(lowercase_ , base_model=lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# load modified state_dict
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = model(**lowercase_ )
# verify classification model
if task_name.startswith("imagenet" ):
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n """
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 706 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 | 0 |
"""simple docstring"""
import requests
lowerCamelCase = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def a__ ( lowerCAmelCase__ ):
# fetching a list of articles in json format
UpperCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 707 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase__ ( _lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase = 'decision_transformer'
UpperCamelCase = ['past_key_values']
UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[str]=17 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Any=128 , _UpperCAmelCase : List[str]=4096 , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : List[str]=1024 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any="relu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[str]=1e-5 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[str]=50256 , _UpperCAmelCase : Tuple=50256 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : str , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_ep_len
UpperCAmelCase_ = action_tanh
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scale_attn_weights
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ = reorder_and_upcast_attn
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 708 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_ = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase_ = InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : str , **_UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def lowercase__ ( self : Dict , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def lowercase__ ( self : Dict , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_a , return_tensors="np" )
UpperCAmelCase_ = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
UpperCAmelCase_ = """lower newer"""
UpperCAmelCase_ = processor(text=_a )
UpperCAmelCase_ = tokenizer(_a , return_token_type_ids=_a )
UpperCAmelCase_ = qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
UpperCAmelCase_ = """lower newer"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_a )
UpperCAmelCase_ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
UpperCAmelCase_ = """lower newer"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowercase__ ( self : str , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCamelCase__ ) , 1103 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
UpperCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
UpperCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
UpperCAmelCase_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase_ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase_ = '''To ensure a smooth flow of bank resolutions.'''
UpperCAmelCase_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase_ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ['''This is going to be way too long.''' * 150, '''short example''']
UpperCAmelCase_ = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase_ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" )
UpperCAmelCase_ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase__ ( self : Any , **_UpperCAmelCase : int ) -> Any:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
UpperCAmelCase_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
UpperCAmelCase_ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ['''This is going to be way too long.''' * 1000, '''short example''']
UpperCAmelCase_ = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase_ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" )
UpperCAmelCase_ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
UpperCAmelCase_ = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14 | 0 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCamelCase = """scheduler_config.json"""
class lowercase__ ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 5
@dataclass
class lowercase__ ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = 42
class lowercase__ :
'''simple docstring'''
UpperCamelCase = SCHEDULER_CONFIG_NAME
UpperCamelCase = ["dtype"]
UpperCamelCase = []
UpperCamelCase = True
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : Dict , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase_ , UpperCAmelCase_ = cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
if hasattr(_lowerCamelCase , "create_state" ) and getattr(_lowerCamelCase , "has_state" , _lowerCamelCase ):
UpperCAmelCase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str = False , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase__ ( cls : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase_ = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase_ = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=0.999 , lowerCAmelCase__=jnp.floataa ):
def alpha_bar(lowerCAmelCase__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCAmelCase_ = []
for i in range(lowerCamelCase_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
@classmethod
def lowercase__ ( cls : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = scheduler.config
if config.trained_betas is not None:
UpperCAmelCase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCAmelCase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
UpperCAmelCase_ = 1.0 - betas
UpperCAmelCase_ = jnp.cumprod(_lowerCamelCase , axis=0 )
return cls(
alphas=_lowerCamelCase , betas=_lowerCamelCase , alphas_cumprod=_lowerCamelCase , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = state.alphas_cumprod
UpperCAmelCase_ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ = sqrt_alpha_prod.flatten()
UpperCAmelCase_ = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
UpperCAmelCase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ = sqrt_one_minus_alpha_prod.flatten()
UpperCAmelCase_ = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 711 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( _A ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : Union[str, Any] = True , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : List[Any] = PILImageResampling.BICUBIC , _UpperCAmelCase : str = True , _UpperCAmelCase : Tuple = 1 / 255 , _UpperCAmelCase : List[Any] = True , _UpperCAmelCase : Any = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Union[str, Any] = True , **_UpperCAmelCase : str , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"height": 384, "width": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] = PILImageResampling.BICUBIC , _UpperCAmelCase : List[str] = None , **_UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase_ = (size["height"], size["width"])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any = None , **_UpperCAmelCase : Tuple , ) -> str:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict = None , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : str = None , _UpperCAmelCase : Dict = None , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : str = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Any = ChannelDimension.FIRST , **_UpperCAmelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 712 |
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCamelCase = object()
# For specifying empty leaf dict `{}`
lowerCamelCase = object()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a__ ) - len(a__ ) + 1 ):
UpperCAmelCase_ = [x.match(a__ ) for x, y in zip(a__ , ks[i:] )]
if matches and all(a__ ):
return True
return False
def a__ ( lowerCAmelCase__ ):
def replace(lowerCAmelCase__ , lowerCAmelCase__ ):
for rule, replacement in rules:
if _match(a__ , a__ ):
return replacement
return val
return replace
def a__ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a__ )),
(("transformer", "wte", "embedding"), P("mp" , a__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = _get_partition_rules()
UpperCAmelCase_ = _replacement_rules(a__ )
UpperCAmelCase_ = {k: _unmatched for k in flatten_dict(a__ )}
UpperCAmelCase_ = {k: replace(a__ , a__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a__ ) )
| 713 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "width_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict="swish" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=10 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.0 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class lowercase__ ( UpperCamelCase__ ):
'''simple docstring'''
UpperCamelCase = '''deta'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Union[str, Any]=900 , _UpperCAmelCase : str=2048 , _UpperCAmelCase : int=6 , _UpperCAmelCase : Tuple=2048 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : List[Any]=6 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : Any="relu" , _UpperCAmelCase : str=256 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int="sine" , _UpperCAmelCase : str=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=300 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Any=5 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.25 , **_UpperCAmelCase : int , ) -> Any:
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__A , __A ):
UpperCAmelCase_ = backbone_config.pop("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(__A )
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
# deformable attributes
UpperCAmelCase_ = num_feature_levels
UpperCAmelCase_ = encoder_n_points
UpperCAmelCase_ = decoder_n_points
UpperCAmelCase_ = two_stage
UpperCAmelCase_ = two_stage_num_proposals
UpperCAmelCase_ = with_box_refine
UpperCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
UpperCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 714 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ , UpperCAmelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ = result + left + right
return input_list
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return input_list
UpperCAmelCase_ = list(lowerCAmelCase__ )
# iteration for two-way merging
UpperCAmelCase_ = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i + p - 1
UpperCAmelCase_ = (low + high + 1) // 2
UpperCAmelCase_ = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase = []
else:
lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase__ ( UpperCamelCase_ ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''AutoImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
UpperCAmelCase_ = self.image_processor
UpperCAmelCase_ = False
def __call__( self : Tuple , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("images" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase_ = self.image_processor(__a , *__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase_ = encodings['input_ids']
return inputs
def lowercase__ ( self : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def lowercase__ ( self : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.image_processor
UpperCAmelCase_ = False
def lowercase__ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : int=None ) -> Optional[Any]:
'''simple docstring'''
if added_vocab is None:
UpperCAmelCase_ = self.tokenizer.get_added_vocab()
UpperCAmelCase_ = {}
while tokens:
UpperCAmelCase_ = re.search(r"<s_(.*?)>" , __a , re.IGNORECASE )
if start_token is None:
break
UpperCAmelCase_ = start_token.group(1 )
UpperCAmelCase_ = re.search(rF"""</s_{key}>""" , __a , re.IGNORECASE )
UpperCAmelCase_ = start_token.group()
if end_token is None:
UpperCAmelCase_ = tokens.replace(__a , "" )
else:
UpperCAmelCase_ = end_token.group()
UpperCAmelCase_ = re.escape(__a )
UpperCAmelCase_ = re.escape(__a )
UpperCAmelCase_ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , __a , re.IGNORECASE )
if content is not None:
UpperCAmelCase_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCAmelCase_ = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a )
if value:
if len(__a ) == 1:
UpperCAmelCase_ = value[0]
UpperCAmelCase_ = value
else: # leaf nodes
UpperCAmelCase_ = []
for leaf in content.split(r"<sep/>" ):
UpperCAmelCase_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCAmelCase_ = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
UpperCAmelCase_ = output[key][0]
UpperCAmelCase_ = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 716 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size["shortest_edge"] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(_UpperCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase = get_logger(__name__)
class lowercase__ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = (
os.path.join(__A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase_ = Extractor
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase_ = os.path.abspath(__A )
return os.path.join(self.extract_dir , hash_url_to_filename(__A ) )
def lowercase__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
UpperCAmelCase_ = self._get_output_path(__A )
if self._do_extract(__A , __A ):
self.extractor.extract(__A , __A , __A )
return output_path
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
@abstractmethod
def lowercase__ ( cls : str , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Any ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = []
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
with open(__A , "rb" ) as f:
return f.read(__A )
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
UpperCAmelCase_ = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase_ = cls.read_magic_number(__A , __A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : Union[str, Any] , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__A )
@staticmethod
def lowercase__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
def resolved(_UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A , __A ) ).startswith(__A )
def badlink(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase_ = resolved(os.path.join(__A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__A )
UpperCAmelCase_ = resolved(__A )
for finfo in members:
if badpath(finfo.name , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__A , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__A , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
UpperCAmelCase_ = tarfile.open(__A )
tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A ) )
tar_file.close()
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\x1F\x8B''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(__A , "rb" ) as gzip_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(__A , magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A , "rb" ) as fp:
UpperCAmelCase_ = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase_ = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
UpperCAmelCase_ = struct.unpack(__A , __A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
with zipfile.ZipFile(__A , "r" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(__A ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__A , exist_ok=__A )
UpperCAmelCase_ = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCAmelCase_ = zstd.ZstdDecompressor()
with open(__A , "rb" ) as ifh, open(__A , "wb" ) as ofh:
dctx.copy_stream(__A , __A )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\x42\x5A\x68''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(__A , "rb" ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__A , exist_ok=__A )
with pyazr.SevenZipFile(__A , "r" ) as archive:
archive.extractall(__A )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = [b'''\x04\x22\x4D\x18''']
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__A , "rb" ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class lowercase__ :
'''simple docstring'''
UpperCamelCase = {
'''tar''': TarExtractor,
'''gzip''': GzipExtractor,
'''zip''': ZipExtractor,
'''xz''': XzExtractor,
'''rar''': RarExtractor,
'''zstd''': ZstdExtractor,
'''bz2''': BzipaExtractor,
'''7z''': SevenZipExtractor, # <Added version="2.4.0"/>
'''lz4''': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase__ ( cls : Any ) -> List[Any]:
'''simple docstring'''
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A , __A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase__ ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A )
except OSError:
return b""
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use \'infer_extractor_format\' instead." , category=__A , )
UpperCAmelCase_ = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
UpperCAmelCase_ = cls._get_magic_number_max_length()
UpperCAmelCase_ = cls._read_magic_number(__A , __A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A , magic_number=__A ):
return extractor_format
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__A ) , exist_ok=__A )
# Prevent parallel extractions
UpperCAmelCase_ = str(Path(__A ).with_suffix(".lock" ) )
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A , __A ): # passed as positional arg
warnings.warn(
"Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use \'extractor_format\' instead." , category=__A , )
UpperCAmelCase_ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCAmelCase_ = cls.extractors[extractor_format]
return extractor.extract(__A , __A )
else:
warnings.warn(
"Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A , __A )
| 717 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0]
UpperCAmelCase_ = [0]
UpperCAmelCase_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
UpperCAmelCase_ = [60]
UpperCAmelCase_ = [10]
UpperCAmelCase_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = 3
UpperCAmelCase_ = [1, 2, 3]
UpperCAmelCase_ = [3, 2, 1]
UpperCAmelCase_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = 50
UpperCAmelCase_ = [60, 100, 120]
UpperCAmelCase_ = [10, 20, 30]
UpperCAmelCase_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 718 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCAmelCase_ = hex_num[0] == """-"""
if is_negative:
UpperCAmelCase_ = hex_num[1:]
try:
UpperCAmelCase_ = int(lowercase_ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCAmelCase_ = """"""
while int_num > 0:
UpperCAmelCase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 'fnet'
def __init__( self : Tuple , _UpperCAmelCase : List[str]=32000 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : Tuple="gelu_new" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Union[str, Any]=512 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int=512 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_tpu_fourier_optimizations
UpperCAmelCase_ = tpu_short_seq_length
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
class lowercase__ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
UpperCamelCase = datasets.Audio()
UpperCamelCase = "audio"
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowerCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase = AUDIO_EXTENSIONS
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""T""")
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] = True ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {} # dictionary of lists
UpperCAmelCase_ = directed
def lowercase__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
self.adj_list[destination_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
UpperCAmelCase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase )
UpperCAmelCase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCAmelCase_ = [destination_vertex]
UpperCAmelCase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
UpperCAmelCase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCAmelCase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCAmelCase_ = [destination_vertex]
UpperCAmelCase_ = []
return self
def __repr__( self : Tuple ) -> List[str]:
'''simple docstring'''
return pformat(self.adj_list )
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
"""simple docstring"""
import numpy as np
class lowercase__ :
def __init__( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def __eq__( self : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
return self.position == cell.position
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
print(self.position )
class lowercase__ :
def __init__( self : Tuple , _UpperCAmelCase : Tuple=(5, 5) ) -> int:
'''simple docstring'''
UpperCAmelCase_ = np.zeros(lowercase__ )
UpperCAmelCase_ = world_size[0]
UpperCAmelCase_ = world_size[1]
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
print(self.w )
def lowercase__ ( self : Dict , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ = cell.position[0]
UpperCAmelCase_ = cell.position[1]
UpperCAmelCase_ = []
for n in neughbour_cord:
UpperCAmelCase_ = current_x + n[0]
UpperCAmelCase_ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ = Cell()
UpperCAmelCase_ = (x, y)
UpperCAmelCase_ = cell
neighbours.append(lowercase__ )
return neighbours
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
_open.append(SCREAMING_SNAKE_CASE_ )
while _open:
UpperCAmelCase_ = np.argmin([n.f for n in _open] )
UpperCAmelCase_ = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE_ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE_ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ = current.g + 1
UpperCAmelCase_ = n.position
UpperCAmelCase_ = goal.position
UpperCAmelCase_ = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase = Gridworld()
# Start position and goal
lowerCamelCase = Cell()
lowerCamelCase = (0, 0)
lowerCamelCase = Cell()
lowerCamelCase = (4, 4)
print(F"path from {start.position} to {goal.position}")
lowerCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase = 1
print(world.w)
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="attention" ):
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
if split_mlp_wi:
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
UpperCAmelCase_ = (wi_a, wi_a)
else:
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
UpperCAmelCase_ = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def a__ ( lowerCAmelCase__ , *, lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = traverse_util.flatten_dict(variables["target"] )
UpperCAmelCase_ = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase_ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print("Split MLP:" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase_ = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "encoder" , "pre_attention_layer_norm" )
UpperCAmelCase_ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "encoder" , "attention" )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "encoder" , "pre_mlp_layer_norm" )
UpperCAmelCase_ = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "encoder" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = layer_norm
if split_mlp_wi:
UpperCAmelCase_ = wi[0].T
UpperCAmelCase_ = wi[1].T
else:
UpperCAmelCase_ = wi.T
UpperCAmelCase_ = wo.T
UpperCAmelCase_ = old[
'encoder/relpos_bias/rel_embedding'
].T
UpperCAmelCase_ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , "pre_self_attention_layer_norm" )
UpperCAmelCase_ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , "self_attention" )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , "pre_cross_attention_layer_norm" )
UpperCAmelCase_ = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , "encoder_decoder_attention" )
UpperCAmelCase_ = layer_norm
UpperCAmelCase_ = k.T
UpperCAmelCase_ = o.T
UpperCAmelCase_ = q.T
UpperCAmelCase_ = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , "pre_mlp_layer_norm" )
UpperCAmelCase_ = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "decoder" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = layer_norm
if split_mlp_wi:
UpperCAmelCase_ = wi[0].T
UpperCAmelCase_ = wi[1].T
else:
UpperCAmelCase_ = wi.T
UpperCAmelCase_ = wo.T
UpperCAmelCase_ = old['decoder/decoder_norm/scale']
UpperCAmelCase_ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase_ = old['decoder/logits_dense/kernel'].T
return new
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCAmelCase_ = state_dict['shared.weight']
return state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase_ = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print("Done" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase = concatenate_datasets
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadManager
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__ ( __a ):
'''simple docstring'''
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , _UpperCAmelCase : Tuple=58101 , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=1024 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=4096 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=4096 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : str=1024 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=58100 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=58100 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = decoder_vocab_size or vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
class lowercase__ ( __a ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(snake_case__ , self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase__ ( self : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
UpperCAmelCase_ = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(snake_case__ , snake_case__ )
UpperCAmelCase_ = max(snake_case__ , snake_case__ ) - min_num_layers
UpperCAmelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs["attention_mask"].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
UpperCAmelCase_ = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def lowercase__ ( self : List[Any] , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase_ = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def lowercase__ ( self : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCAmelCase_ = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@property
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return 1e-4
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase = """<<<<<<< This should probably be modified because it mentions: """
lowerCamelCase = """=======
>>>>>>>
"""
lowerCamelCase = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowerCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value(\'\1\')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value(\'string\')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value(\'string\'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def a__ ( lowerCAmelCase__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def lowercase__ ( _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=a_ , required=a_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=a_ , required=a_ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=a_ )
def __init__( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = get_logger("datasets-cli/converting" )
UpperCAmelCase_ = tfds_path
UpperCAmelCase_ = datasets_directory
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase_ = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.listdir(a_ )
else:
UpperCAmelCase_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
UpperCAmelCase_ = os.path.join(a_ , a_ )
UpperCAmelCase_ = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(a_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase_ = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ = True
UpperCAmelCase_ = list(filter(lambda _UpperCAmelCase : e in out_line , a_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + "\n" )
out_lines.append(a_ )
out_lines.append(a_ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ = re.sub(a_ , a_ , a_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , a_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase_ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ = True
out_lines.append(a_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ = f_name.replace(".py" , "" )
UpperCAmelCase_ = os.path.join(a_ , a_ )
UpperCAmelCase_ = os.path.join(a_ , a_ )
os.makedirs(a_ , exist_ok=a_ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a_ )
if needs_manual_update:
with_manual_update.append(a_ )
with open(a_ , "w" , encoding="utf-8" ) as f:
f.writelines(a_ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCAmelCase_ = os.path.basename(a_ )
UpperCAmelCase_ = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(a_ , a_ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 706 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCAmelCase_ = f"""{src_lang}-{tgt_lang}"""
UpperCAmelCase_ = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase_ = os.path.join(_snake_case , "README.md" )
print(f"""Generating {path}""" )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(_snake_case )
# make sure we are under the root of the project
lowerCamelCase = Path(__file__).resolve().parent.parent.parent
lowerCamelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase , lowerCamelCase , lowerCamelCase = model_name.split("""-""")
lowerCamelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 707 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A = True
except ImportError:
A = False
A = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( lowerCAmelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase__ ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase__ ( _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=_SCREAMING_SNAKE_CASE , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=_SCREAMING_SNAKE_CASE , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]=None , *_UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(_SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(_SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_SCREAMING_SNAKE_CASE , extra_context=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F"""{directory}/configuration.json""" )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(_UpperCAmelCase : str ):
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(_SCREAMING_SNAKE_CASE , "w" ) as new_file:
with open(_SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(_SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(_SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Remove original file
remove(_SCREAMING_SNAKE_CASE )
# Move new file
move(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def skip_units(_UpperCAmelCase : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_UpperCAmelCase : Optional[Any] ):
with open(_SCREAMING_SNAKE_CASE ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(_SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(_SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(_SCREAMING_SNAKE_CASE )
remove(_SCREAMING_SNAKE_CASE )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_SCREAMING_SNAKE_CASE )
| 708 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 0 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 ):
UpperCAmelCase_ = end or len(A_ )
for i in range(A_ , A_ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_ = temp_index_value
return array
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # Max Heap
UpperCAmelCase_ = index
UpperCAmelCase_ = 2 * index + 1 # Left Node
UpperCAmelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_ = right_index
if largest != index:
UpperCAmelCase_ , UpperCAmelCase_ = array[largest], array[index]
heapify(A_ , A_ , A_ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase_ , UpperCAmelCase_ = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = low
UpperCAmelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_ , UpperCAmelCase_ = array[j], array[i]
i += 1
def a__ ( lowerCAmelCase__ ):
if len(A_ ) == 0:
return array
UpperCAmelCase_ = 2 * math.ceil(math.loga(len(A_ ) ) )
UpperCAmelCase_ = 16
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
UpperCAmelCase_ = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase_ = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = input("""Enter numbers separated by a comma : """).strip()
lowerCamelCase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowercase__ ( snake_case__ ):
'''simple docstring'''
UpperCamelCase = '''codegen'''
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , _UpperCAmelCase : List[str]=50400 , _UpperCAmelCase : Tuple=2048 , _UpperCAmelCase : Any=2048 , _UpperCAmelCase : Union[str, Any]=4096 , _UpperCAmelCase : str=28 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : int=None , _UpperCAmelCase : List[str]="gelu_new" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[str]=1e-5 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=50256 , _UpperCAmelCase : str=50256 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class lowercase__ ( snake_case__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] = "default" , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : List[str] = False , ) -> Any:
'''simple docstring'''
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , "pad_token_id" , _A ):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
UpperCAmelCase_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCAmelCase_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self._config.n_layer
@property
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self._config.n_head
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] = -1 , _UpperCAmelCase : Dict = -1 , _UpperCAmelCase : int = False , _UpperCAmelCase : Optional[Any] = None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
UpperCAmelCase_ = common_inputs['attention_mask']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['attention_mask'].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return 13
| 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14 | 0 |
"""simple docstring"""
from torch import nn
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__a , __a )
def lowercase__ ( self : str , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__a )
return logits
| 711 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase_ = n - 1
UpperCAmelCase_ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase_ = 0
while count < prec:
UpperCAmelCase_ = random.randint(2 , n - 1 )
UpperCAmelCase_ = bin_exp_mod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if b != 1:
UpperCAmelCase_ = True
for _ in range(lowerCamelCase_ ):
if b == n - 1:
UpperCAmelCase_ = False
break
UpperCAmelCase_ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 712 |
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = FlaxAutoencoderKL
@property
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.uniform(UpperCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 713 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "width_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict="swish" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=10 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.0 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
output_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , use_external_data_format=lowerCamelCase__ , enable_onnx_checker=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
else:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = Path(lowerCamelCase__ )
# VAE DECODER
UpperCAmelCase_ = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCAmelCase_ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ = vae_decoder.decode
onnx_export(
lowerCamelCase__ , model_args=(
torch.randn(1 , lowerCamelCase__ , 25 , 25 ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
lowerCamelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 714 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ , UpperCAmelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ = result + left + right
return input_list
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return input_list
UpperCAmelCase_ = list(lowerCAmelCase__ )
# iteration for two-way merging
UpperCAmelCase_ = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i + p - 1
UpperCAmelCase_ = (low + high + 1) // 2
UpperCAmelCase_ = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase = []
else:
lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 14 | 0 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 715 |
"""simple docstring"""
lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
lowerCamelCase = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = torch.load(snake_case__ , map_location="cpu" )
return sd
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=rename_keys_prefix ):
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCAmelCase_ = key
for name_pair in rename_keys_prefix:
UpperCAmelCase_ = new_key.replace(name_pair[0] , name_pair[1] )
UpperCAmelCase_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCAmelCase_ = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
UpperCAmelCase_ = "pretraining"
if "vcr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 512}
UpperCAmelCase_ = "multichoice"
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 2048}
UpperCAmelCase_ = "vqa_advanced"
elif "vqa" in checkpoint_path:
UpperCAmelCase_ = {"visual_embedding_dim": 2048, "num_labels": 3129}
UpperCAmelCase_ = "vqa"
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
UpperCAmelCase_ = "nlvr"
UpperCAmelCase_ = VisualBertConfig(**snake_case__ )
# Load State Dict
UpperCAmelCase_ = load_state_dict(snake_case__ )
UpperCAmelCase_ = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
UpperCAmelCase_ = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
UpperCAmelCase_ = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
UpperCAmelCase_ = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
UpperCAmelCase_ = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 716 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size["shortest_edge"] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(_UpperCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]:
require_version(deps[pkg] , lowerCAmelCase__ )
| 717 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = text_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [text_path]
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=("train",) ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
UpperCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader({"train": text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if split:
UpperCAmelCase_ = {split: text_path}
else:
UpperCAmelCase_ = "train"
UpperCAmelCase_ = {"train": text_path, "test": text_path}
UpperCAmelCase_ = tmp_path / "cache"
UpperCAmelCase_ = {"text": "string"}
UpperCAmelCase_ = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 718 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
UpperCamelCase = '''table-transformer'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Optional[int]=6 , _UpperCAmelCase : Tuple=2048 , _UpperCAmelCase : int=8 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Tuple=2048 , _UpperCAmelCase : Optional[Any]=8 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]="relu" , _UpperCAmelCase : Tuple=256 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Tuple=1.0 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str="sine" , _UpperCAmelCase : Any="resnet50" , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : str=5 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : int=0.1 , **_UpperCAmelCase : int , ) -> Optional[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
return self.d_model
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return 12
| 719 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase = 10
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = max(lowerCAmelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase__ )
# put each buckets' contents into list_of_ints
UpperCAmelCase_ = 0
for b in range(lowerCAmelCase__ ):
for i in buckets[b]:
UpperCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = len(lowerCAmelCase__ )
# We need to create solution object to save path.
UpperCAmelCase_ = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase_ = run_maze(lowerCAmelCase__ , 0 , 0 , lowerCAmelCase__ )
if solved:
print("\n".join(str(lowerCAmelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = len(lowerCAmelCase__ )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase_ = 1
return True
UpperCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase_ = 1
# check for directions
if (
run_maze(lowerCAmelCase__ , i + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , lowerCAmelCase__ , j + 1 , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
or run_maze(lowerCAmelCase__ , lowerCAmelCase__ , j - 1 , lowerCAmelCase__ )
):
return True
UpperCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase__ ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Any="" , _UpperCAmelCase : Any="train" ) -> str:
'''simple docstring'''
assert os.path.isdir(__lowerCamelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = os.listdir(__lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ):
continue
self.documents.append(__lowerCamelCase )
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self.documents )
def __getitem__( self : List[str] , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.documents[idx]
UpperCAmelCase_ = document_path.split("/" )[-1]
with open(__lowerCamelCase , encoding="utf-8" ) as source:
UpperCAmelCase_ = source.read()
UpperCAmelCase_ = process_story(__lowerCamelCase )
return document_name, story_lines, summary_lines
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase__ : len(lowerCamelCase_ ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ = [_add_missing_period(lowerCamelCase_ ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ = []
UpperCAmelCase_ = deque(lowerCamelCase_ )
while True:
try:
UpperCAmelCase_ = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(lowerCamelCase_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase__ : not t.startswith("@highlight" ) , lowerCamelCase_ ) )
return story_lines, summary_lines
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if len(lowerCamelCase_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowerCamelCase_ )) )
return sequence
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = torch.ones_like(lowerCamelCase_ )
UpperCAmelCase_ = sequence == pad_token_id
UpperCAmelCase_ = 0
return mask
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [tokenizer.encode(lowerCamelCase_ ) for line in story_lines]
UpperCAmelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ = [tokenizer.encode(lowerCamelCase_ ) for line in summary_lines]
UpperCAmelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
for sequence in batch:
UpperCAmelCase_ = -1
UpperCAmelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowerCamelCase_ )
return torch.tensor(lowerCamelCase_ )
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def a__ ( ):
UpperCAmelCase_ = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
UpperCAmelCase_ = os.path.join(UpperCamelCase__ , "words.txt" )
UpperCAmelCase_ = ""
with open(UpperCamelCase__ ) as f:
UpperCAmelCase_ = f.readline()
UpperCAmelCase_ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
UpperCAmelCase_ = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = b.T
UpperCAmelCase_ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
UpperCAmelCase_ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
UpperCAmelCase_ = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x.reshape(-1 , 3 )
UpperCAmelCase_ = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class lowercase__ ( snake_case__ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Optional[int] = True , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : int = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[int] = True , _UpperCAmelCase : List[Any] = True , **_UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**lowercase_ )
UpperCAmelCase_ = size if size is not None else {"height": 256, "width": 256}
UpperCAmelCase_ = get_size_dict(lowercase_ )
UpperCAmelCase_ = np.array(lowercase_ ) if clusters is not None else None
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_color_quantize
def lowercase__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] = PILImageResampling.BILINEAR , _UpperCAmelCase : Dict = None , **_UpperCAmelCase : Dict , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] = None , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
UpperCAmelCase_ = image - 1
return image
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : int = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Optional[int] = ChannelDimension.FIRST , **_UpperCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowercase_ )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase_ = clusters if clusters is not None else self.clusters
UpperCAmelCase_ = np.array(lowercase_ )
UpperCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
UpperCAmelCase_ = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase_ = np.array(lowercase_ )
UpperCAmelCase_ = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase_ = images.shape[0]
UpperCAmelCase_ = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase_ = list(lowercase_ )
else:
UpperCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_ = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = "▁"
lowerCamelCase = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase__ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[Any]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : Tuple="<mask>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ) -> None:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ = 1
UpperCAmelCase_ = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : int , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : str , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "".join(A_ ).replace(A_ , " " ).strip()
return out_string
def lowercase__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowercase__ ( __lowercase ):
'''simple docstring'''
UpperCamelCase = '''ibert'''
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : List[str]=1e-12 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[int]="none" , **_UpperCAmelCase : Any , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = quant_mode
UpperCAmelCase_ = force_dequant
class lowercase__ ( __lowercase ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
"""simple docstring"""
import math
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=0 ) -> List[str]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
UpperCAmelCase_ = n
UpperCAmelCase_ = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
UpperCAmelCase_ = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = w
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 706 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 0.0
for coeff in reversed(lowerCAmelCase__ ):
UpperCAmelCase_ = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 707 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A = 50_000
A = 5_000
A , A = os.path.split(__file__)
A = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
UpperCAmelCase_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 708 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
for part_id in partition_order:
UpperCAmelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(100 ).repartition(1 )
UpperCAmelCase_ = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(10 ).repartition(2 )
UpperCAmelCase_ = [1, 0]
UpperCAmelCase_ = _generate_iterable_examples(lowerCAmelCase__ , lowerCAmelCase__ ) # Reverse the partitions.
UpperCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase_ , UpperCAmelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(10 ).repartition(1 )
UpperCAmelCase_ = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCAmelCase_ = lambda lowerCAmelCase__ : x.reverse()
UpperCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [2, 1, 0] )
UpperCAmelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ):
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ = spark.range(100 ).repartition(1 )
UpperCAmelCase_ = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
UpperCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase_ = {}
UpperCAmelCase_ = "first_stage_model."
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase_ = {}
UpperCAmelCase_ = "model.diffusion_model."
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ = state_dict[key]
UpperCAmelCase_ = config.model.params.first_stage_config.params
UpperCAmelCase_ = config.model.params.unet_config.params
UpperCAmelCase_ = VQModel(**lowerCAmelCase__ ).eval()
vqvae.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = UNetLDMModel(**lowerCAmelCase__ ).eval()
unet.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase__ , )
UpperCAmelCase_ = LDMPipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipeline.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
lowerCamelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 100 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 711 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : float , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> Any:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : float = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase = getLogger(__name__)
lowerCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 8 , lowerCAmelCase__ = DEFAULT_DEVICE , lowerCAmelCase__=False , lowerCAmelCase__="summarization" , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
UpperCAmelCase_ = Path(a_ ).open("w" , encoding="utf-8" )
UpperCAmelCase_ = str(a_ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(a_ ).to(a_ )
if fpaa:
UpperCAmelCase_ = model.half()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(a_ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCAmelCase_ = time.time()
# update config with task specific params
use_task_specific_params(a_ , a_ )
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ''''''
for examples_chunk in tqdm(list(chunks(a_ , a_ ) ) ):
UpperCAmelCase_ = [prefix + text for text in examples_chunk]
UpperCAmelCase_ = tokenizer(a_ , return_tensors="pt" , truncation=a_ , padding="longest" ).to(a_ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a_ , )
UpperCAmelCase_ = tokenizer.batch_decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCAmelCase_ = int(time.time() - start_time ) # seconds
UpperCAmelCase_ = len(a_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a__ ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def a__ ( lowerCAmelCase__=True ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("model_name" , type=a_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=a_ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=a_ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=a_ , required=a_ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=a_ , required=a_ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=a_ , required=a_ , default=a_ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=a_ , required=a_ , default=a_ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=a_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=a_ , default=8 , required=a_ , help="batch size" )
parser.add_argument(
"--n_obs" , type=a_ , default=-1 , required=a_ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=a_ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(a_ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
UpperCAmelCase_ = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCAmelCase_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can\'t mix --fp16 and --device cpu" )
UpperCAmelCase_ = generate_summaries_or_translations(
a_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCAmelCase_ = calculate_bleu if '''translation''' in args.task else calculate_rouge
UpperCAmelCase_ = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCAmelCase_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a_ )]
UpperCAmelCase_ = score_fn(a_ , a_ )
scores.update(a_ )
if args.dump_args:
scores.update(a_ )
if args.info:
UpperCAmelCase_ = args.info
if verbose:
print(a_ )
if args.score_path is not None:
json.dump(a_ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 712 |
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class lowercase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[str] = 14 ) -> List[str]:
'''simple docstring'''
if group not in primes:
raise ValueError("Unsupported Group" )
UpperCAmelCase_ = primes[group]["prime"]
UpperCAmelCase_ = primes[group]["generator"]
UpperCAmelCase_ = int(hexlify(urandom(32 ) ) , base=16 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def lowercase__ ( self : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = int(_A , base=16 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
UpperCAmelCase_ = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def lowercase__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def lowercase__ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple = 14 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(_A , base=16 )
UpperCAmelCase_ = int(_A , base=16 )
UpperCAmelCase_ = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
UpperCAmelCase_ = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "width_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict="swish" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=10 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.0 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = width_multiplier
UpperCAmelCase_ = ffn_dropout
UpperCAmelCase_ = attn_dropout
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaModelTester(self )
UpperCAmelCase_ = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(UpperCAmelCase__ )
assert noofclusters < len(UpperCAmelCase__ )
# Find out the dimensionality
UpperCAmelCase_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase_ = list(range(len(UpperCAmelCase__ ) ) )
shuffle(UpperCAmelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase_ = tf.placeholder("float64" , [dim] )
UpperCAmelCase_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase_ = [tf.Variable(0 ) for i in range(len(UpperCAmelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase_ = tf.placeholder("int32" )
UpperCAmelCase_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase_ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase_ = tf.reduce_mean(UpperCAmelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase_ = tf.placeholder("float" , [dim] )
UpperCAmelCase_ = tf.placeholder("float" , [dim] )
UpperCAmelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase__ , UpperCAmelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase_ = tf.placeholder("float" , [noofclusters] )
UpperCAmelCase_ = tf.argmin(UpperCAmelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase_ = 100
for _ in range(UpperCAmelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase_ = [
sess.run(UpperCAmelCase__ , feed_dict={va: vect, va: sess.run(UpperCAmelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase_ = sess.run(
UpperCAmelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase__ ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase_ = [
vectors[i]
for i in range(len(UpperCAmelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase_ = sess.run(
UpperCAmelCase__ , feed_dict={mean_input: array(UpperCAmelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase_ = sess.run(UpperCAmelCase__ )
UpperCAmelCase_ = sess.run(UpperCAmelCase__ )
return centroids, assignments
| 714 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ , UpperCAmelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ = result + left + right
return input_list
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return input_list
UpperCAmelCase_ = list(lowerCAmelCase__ )
# iteration for two-way merging
UpperCAmelCase_ = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = i + p - 1
UpperCAmelCase_ = (low + high + 1) // 2
UpperCAmelCase_ = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase = []
else:
lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 14 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Dict = False , _UpperCAmelCase : str = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = path_or_paths
UpperCAmelCase_ = split if split or isinstance(_UpperCAmelCase , _UpperCAmelCase ) else "train"
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Union[str, Any] = False , _UpperCAmelCase : Dict = False , _UpperCAmelCase : List[str] = None , **_UpperCAmelCase : str , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
| 715 |
"""simple docstring"""
lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size["shortest_edge"] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(_UpperCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def a__ ( lowerCAmelCase__ ) -> Any:
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def a__ ( lowerCAmelCase__ ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> List[str]:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if exitstatus == 5:
UpperCAmelCase_ = 0
# Doctest custom flag to ignore output.
lowerCamelCase = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCamelCase = doctest.OutputChecker
class lowercase__ ( __lowerCAmelCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = CustomOutputChecker
lowerCamelCase = HfDoctestModule
lowerCamelCase = HfDocTestParser
| 717 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCamelCase = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
lowerCamelCase = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
lowerCamelCase = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=1 , _UpperCAmelCase : Optional[int]="binary" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any="warn" , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = recall_score(
_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase , pos_label=_UpperCAmelCase , average=_UpperCAmelCase , sample_weight=_UpperCAmelCase , zero_division=_UpperCAmelCase , )
return {"recall": float(_UpperCAmelCase ) if score.size == 1 else score}
| 718 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowerCAmelCase__ ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase__ ( lowercase_ ):
'''simple docstring'''
@staticmethod
def lowercase__ ( _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_UpperCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = model
UpperCAmelCase_ = cache
UpperCAmelCase_ = force
UpperCAmelCase_ = trust_remote_code
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 719 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = """▁"""
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = BigBirdTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors="pt" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase_ = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase_ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Base Case
if curr_ind == len(lowerCAmelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCAmelCase__ ) ):
if valid_connection(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Insert current vertex into path as next transition
UpperCAmelCase_ = next_ver
# Validate created path
if util_hamilton_cycle(lowerCAmelCase__ , lowerCAmelCase__ , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase_ = -1
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
UpperCAmelCase_ = [-1] * (len(lowerCAmelCase__ ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase_ = UpperCAmelCase_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) else []
| 721 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 14 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ):
UpperCAmelCase_ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = UpperCAmelCase_ = CvtConfig(num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
UpperCAmelCase_ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
UpperCAmelCase_ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ = [2, 2, 20]
UpperCAmelCase_ = [3, 12, 16]
UpperCAmelCase_ = [192, 768, 1024]
UpperCAmelCase_ = CvtForImageClassification(lowerCAmelCase__ )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=torch.device("cpu" ) )
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ = list_of_state_dict + cls_token(lowerCAmelCase__ )
UpperCAmelCase_ = list_of_state_dict + embeddings(lowerCAmelCase__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ = list_of_state_dict + attention(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''convbert'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = head_ratio
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 14 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__ :
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Optional[Any]=[0, 1, 2, 3] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = 100
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = out_indices
UpperCAmelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
UpperCAmelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
UpperCAmelCase_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 0
if start < end:
UpperCAmelCase_ = randint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ , UpperCAmelCase_ = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = randint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ = new_pivot_index + 1
UpperCAmelCase_ = a[new_pivot_index]
UpperCAmelCase_ = a[index]
UpperCAmelCase_ = temp
UpperCAmelCase_ = a[new_pivot_index + 1]
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = temp
return new_pivot_index + 1, count
lowerCamelCase = TemporaryFile()
lowerCamelCase = 100 # 1000 elements are to be sorted
lowerCamelCase , lowerCamelCase = 0, 1 # mean and standard deviation
lowerCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCamelCase = np.load(outfile)
lowerCamelCase = len(M) - 1
lowerCamelCase = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ):
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def a__ ( lowerCAmelCase__ ):
print("Generating prime p..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
print("Generating prime q..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
UpperCAmelCase_ = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
UpperCAmelCase_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase__ , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
UpperCAmelCase_ = cryptoMath.find_mod_inverse(lowerCAmelCase__ , (p - 1) * (q - 1) )
UpperCAmelCase_ = (n, e)
UpperCAmelCase_ = (n, d)
return (public_key, private_key)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
UpperCAmelCase_ , UpperCAmelCase_ = generate_key(lowerCAmelCase__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase = input("""Enter image url: """).strip()
print(F"Downloading image from {url} ...")
lowerCamelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCamelCase = requests.get(image_url).content
lowerCamelCase = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.") | 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
UpperCAmelCase_ = ""
while len(lowerCAmelCase__ ) % 3 != 0:
UpperCAmelCase_ = "0" + bin_string
UpperCAmelCase_ = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCAmelCase_ = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCAmelCase_ = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCAmelCase_ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCAmelCase_ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowerCAmelCase__ )-1}""" )
if "norm" in key:
UpperCAmelCase_ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCAmelCase_ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowerCAmelCase__ )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase_ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCAmelCase_ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_ = key[key.find("block" ) + len("block" )]
UpperCAmelCase_ = key.replace(f"""block{idx}""" , f"""block.{int(lowerCAmelCase__ )-1}""" )
if "attn.q" in key:
UpperCAmelCase_ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCAmelCase_ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCAmelCase_ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCAmelCase_ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCAmelCase_ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCAmelCase_ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCAmelCase_ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCAmelCase_ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCAmelCase_ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowerCAmelCase__ )-1}""" )
if key.startswith("head" ):
UpperCAmelCase_ = key.replace("head" , "classifier" )
UpperCAmelCase_ = value
return new_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = SegformerConfig()
UpperCAmelCase_ = False
# set attributes based on model_name
UpperCAmelCase_ = "huggingface/label-files"
if "segformer" in model_name:
UpperCAmelCase_ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
UpperCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
UpperCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = model_name[4:6]
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = (1, 1000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 256
elif size == "b2":
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 768
UpperCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 768
UpperCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 768
UpperCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 768
UpperCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=torch.device("cpu" ) )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCAmelCase_ = rename_keys(lowerCAmelCase__ , encoder_only=lowerCAmelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
UpperCAmelCase_ = False
UpperCAmelCase_ = SegformerForImageClassification(lowerCAmelCase__ )
else:
UpperCAmelCase_ = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCAmelCase_ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCAmelCase_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 707 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit_msn'''
def __init__( self : int , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1e-06 , _UpperCAmelCase : int=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 708 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase = datasets.logging.get_logger(__name__)
lowerCamelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCamelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCamelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ):
UpperCAmelCase_ = {doc: key_lines}
UpperCAmelCase_ = {doc: sys_lines}
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_ = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase_ = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
UpperCAmelCase_ = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Dict=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
UpperCAmelCase_ = util.check_gold_parse_annotation(_UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_ = evaluate(
key_lines=_UpperCAmelCase , sys_lines=_UpperCAmelCase , metrics=_UpperCAmelCase , NP_only=_UpperCAmelCase , remove_nested=_UpperCAmelCase , keep_singletons=_UpperCAmelCase , min_span=_UpperCAmelCase , )
return score
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {}
lowerCamelCase = {}
lowerCamelCase = {}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ):
UpperCAmelCase_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
UpperCAmelCase_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
UpperCAmelCase_ = format_type
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ):
UpperCAmelCase_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowerCamelCase = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowerCamelCase = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowerCamelCase = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def a__ ( lowerCAmelCase__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def a__ ( lowerCAmelCase__ , **lowerCAmelCase__ ):
UpperCAmelCase_ = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 710 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.