code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A_ = parser.parse_args()
if args.model_type == "bert":
A_ = BertForMaskedLM.from_pretrained(args.model_name)
A_ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A_ = model.state_dict()
A_ = {}
for w in ["word_embeddings", "position_embeddings"]:
A_ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A_ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A_ = state_dict['''cls.predictions.decoder.weight''']
A_ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A_ = state_dict[F'''cls.predictions.transform.dense.{w}''']
A_ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
from timeit import timeit
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
_snake_case : int = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
_snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__ ():
"""simple docstring"""
def do_benchmark(snake_case__ : int ) -> None:
_snake_case : int = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(snake_case__ ) = }" )
_snake_case : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=snake_case__ )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }" )
_snake_case : Optional[int] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=snake_case__ , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : List[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_snake_case : Dict = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
_snake_case : List[str] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
_snake_case : List[str] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
_snake_case : Union[str, Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
_snake_case : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
_snake_case : List[str] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
_snake_case : Optional[int] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
_snake_case : str = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
_snake_case : str = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
_snake_case : str = key.replace("""image_encoder.module""" , """flava.image_model""" )
_snake_case : Dict = key.replace("""text_encoder.module""" , """flava.text_model""" )
_snake_case : List[str] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
_snake_case : Any = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
_snake_case : List[Any] = key.replace("""text_projection""" , """flava.text_projection""" )
_snake_case : Dict = key.replace("""image_projection""" , """flava.image_projection""" )
_snake_case : Tuple = value.float()
for key, value in codebook_state_dict.items():
_snake_case : Optional[Any] = value
return upgrade
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any]=None ):
"""simple docstring"""
if config_path is not None:
_snake_case : str = FlavaConfig.from_pretrained(snake_case__ )
else:
_snake_case : int = FlavaConfig()
_snake_case : int = FlavaForPreTraining(snake_case__ ).eval()
_snake_case : Tuple = convert_dalle_checkpoint(snake_case__ , snake_case__ , save_checkpoint=snake_case__ )
if os.path.exists(snake_case__ ):
_snake_case : int = torch.load(snake_case__ , map_location="""cpu""" )
else:
_snake_case : List[Any] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )
_snake_case : Any = upgrade_state_dict(snake_case__ , snake_case__ )
hf_model.load_state_dict(snake_case__ )
_snake_case : List[Any] = hf_model.state_dict()
_snake_case : Union[str, Any] = count_parameters(snake_case__ )
_snake_case : Optional[Any] = count_parameters(snake_case__ ) + count_parameters(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
A_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from collections import deque
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Dict = len(snake_case__ )
_snake_case : List[Any] = deque()
_snake_case : List[str] = [False for _ in range(snake_case__ )]
_snake_case : List[str] = [-1 for _ in range(snake_case__ )]
_snake_case : Optional[int] = index_of[:]
def strong_connect(snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
_snake_case : Dict = index # the number when this node is seen
_snake_case : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
_snake_case : Tuple = True
for w in g[v]:
if index_of[w] == -1:
_snake_case : int = strong_connect(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_snake_case : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_snake_case : int = []
_snake_case : List[str] = stack.pop()
_snake_case : int = False
component.append(snake_case__ )
while w != v:
_snake_case : Optional[int] = stack.pop()
_snake_case : str = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
_snake_case : Any = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Dict = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
A_ = 7
A_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
A_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
A_ = [(u, v) for u, v in zip(source, target)]
A_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
from collections.abc import Callable
def UpperCAmelCase__ (snake_case__ : Callable[[float], float] , snake_case__ : float , snake_case__ : float ):
"""simple docstring"""
_snake_case : float = a
_snake_case : float = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
_snake_case : Tuple = mid
else:
_snake_case : List[str] = mid
_snake_case : Any = start + (end - start) / 2.0
return mid
def UpperCAmelCase__ (snake_case__ : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
assert x is not None
assert y is not None
_snake_case : List[str] = len(snake_case__ )
_snake_case : Any = len(snake_case__ )
# declaring the array for storing the dp values
_snake_case : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_snake_case : int = 1 if x[i - 1] == y[j - 1] else 0
_snake_case : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_snake_case : str = """"""
_snake_case : Any = m, n
while i > 0 and j > 0:
_snake_case : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_snake_case : List[str] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A_ = '''AGGTAB'''
A_ = '''GXTXAYB'''
A_ = 4
A_ = '''GTAB'''
A_ , A_ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Union[str, Any], a_: int, a_: Optional[int]=7, a_: Any=3, a_: Any=18, a_: Union[str, Any]=30, a_: Union[str, Any]=400, a_: Any=True, a_: Tuple=None, a_: Tuple=True, ):
'''simple docstring'''
_snake_case : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
_snake_case : Dict = parent
_snake_case : int = batch_size
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = image_size
_snake_case : Dict = min_resolution
_snake_case : Optional[int] = max_resolution
_snake_case : Any = do_resize
_snake_case : Optional[int] = size
_snake_case : Tuple = do_normalize
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """clusters""" ) )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""height""": 18, """width""": 18} )
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"""height""": 42, """width""": 42} )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
_snake_case : str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a_, obj[key] ) )
else:
self.assertEqual(obj[key], a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[str] = os.path.join(a_, """image_processor.json""" )
image_processor_first.to_json_file(a_ )
_snake_case : Tuple = self.image_processing_class.from_json_file(a_ ).to_dict()
_snake_case : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a_ )
_snake_case : Any = self.image_processing_class.from_pretrained(a_ ).to_dict()
_snake_case : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], a_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_snake_case : Dict = Image.open(dataset[4]["""file"""] )
_snake_case : Union[str, Any] = Image.open(dataset[5]["""file"""] )
_snake_case : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_snake_case : List[str] = prepare_images()
# test non-batched
_snake_case : Tuple = image_processing(images[0], return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1_024) )
_snake_case : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), a_ )
# test batched
_snake_case : Union[str, Any] = image_processing(a_, return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1_024) )
_snake_case : Tuple = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), a_ )
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = []
def UpperCamelCase_ ( self: List[Any], a_: Any ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = pos
def UpperCamelCase_ ( self: List[Any], a_: int, a_: Dict, a_: int, a_: Optional[Any] ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_snake_case : Dict = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_snake_case : int = 2 * start + 1
else:
_snake_case : Optional[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
_snake_case : Any = heap[smallest_child], positions[smallest_child]
_snake_case : Tuple = (
heap[start],
positions[start],
)
_snake_case : Optional[Any] = temp, tempa
_snake_case : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], a_ )
self.top_to_bottom(a_, a_, a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: Dict ):
'''simple docstring'''
_snake_case : str = position[index]
while index != 0:
_snake_case : Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_snake_case : Union[str, Any] = heap[parent]
_snake_case : int = position[parent]
self.set_position(position[parent], a_ )
else:
_snake_case : List[Any] = val
_snake_case : Any = temp
self.set_position(a_, a_ )
break
_snake_case : int = parent
else:
_snake_case : Tuple = val
_snake_case : List[Any] = temp
self.set_position(a_, 0 )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Any = len(a_ ) // 2 - 1
for i in range(a_, -1, -1 ):
self.top_to_bottom(a_, a_, len(a_ ), a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = positions[0]
_snake_case : str = sys.maxsize
self.top_to_bottom(a_, 0, len(a_ ), a_ )
return temp
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = Heap()
_snake_case : Optional[Any] = [0] * len(snake_case__ )
_snake_case : Dict = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_snake_case : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
_snake_case : Dict = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
_snake_case : Any = []
_snake_case : Tuple = 1
_snake_case : List[str] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_snake_case : List[Any] = 0
_snake_case : Dict = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
_snake_case : Dict = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_snake_case : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
_snake_case : Any = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
_snake_case : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A_ = int(input('''Enter number of edges: ''').strip())
A_ = defaultdict(list)
for _ in range(edges_number):
A_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ = ''''''
A_ = ''''''
A_ = ''''''
A_ = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = get_dataset(snake_case__ , snake_case__ )
print("""Processing...""" )
_snake_case : List[Any] = update_image_and_anno(snake_case__ , snake_case__ , snake_case__ )
for index, image in enumerate(snake_case__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : Optional[Any] = random_chars(32 )
_snake_case : Tuple = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_snake_case : Dict = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(snake_case__ )} with {file_name}" )
_snake_case : Optional[int] = []
for anno in new_annos[index]:
_snake_case : str = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(snake_case__ )
with open(F"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
_snake_case : List[str] = []
for label_file in glob.glob(os.path.join(snake_case__ , """*.txt""" ) ):
_snake_case : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case__ ) as in_file:
_snake_case : Union[str, Any] = in_file.readlines()
_snake_case : int = os.path.join(snake_case__ , F"{label_name}.jpg" )
_snake_case : str = []
for obj_list in obj_lists:
_snake_case : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def UpperCAmelCase__ (snake_case__ : list , snake_case__ : list , snake_case__ : int = 1 ):
"""simple docstring"""
_snake_case : Dict = []
_snake_case : Any = []
_snake_case : List[Any] = []
for idx in range(len(snake_case__ ) ):
_snake_case : Optional[int] = []
_snake_case : Any = img_list[idx]
path_list.append(snake_case__ )
_snake_case : Tuple = anno_list[idx]
_snake_case : Any = cva.imread(snake_case__ )
if flip_type == 1:
_snake_case : Optional[int] = cva.flip(snake_case__ , snake_case__ )
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case : List[Any] = cva.flip(snake_case__ , snake_case__ )
for bbox in img_annos:
_snake_case : int = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case__ )
new_imgs_list.append(snake_case__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase__ (snake_case__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_snake_case : Dict = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
_snake_case : int = DisjunctiveConstraint(a_ )
self.assertTrue(isinstance(dc.token_ids, a_ ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a_ ):
DisjunctiveConstraint(a_ ) # fails here
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = [[1, 2, 3], [1, 2, 4]]
_snake_case : Union[str, Any] = DisjunctiveConstraint(a_ )
_snake_case : Optional[Any] = dc.update(1 )
_snake_case : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case : Union[str, Any] = dc.update(2 )
_snake_case : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case : Dict = dc.update(3 )
_snake_case : Optional[Any] = stepped is True and completed is True and reset is False
self.assertTrue(a_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_snake_case : Union[str, Any] = DisjunctiveConstraint(a_ )
_snake_case : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_snake_case : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_snake_case : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_snake_case : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
_snake_case : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ (snake_case__ : np.array ):
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(snake_case__ , snake_case__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_snake_case : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A_ = get_tests_dir('''fixtures''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = mock.Mock()
_snake_case : List[Any] = 500
_snake_case : List[str] = {}
_snake_case : List[Any] = HTTPError
_snake_case : Optional[int] = {}
# Download this model to make sure it's in the cache.
_snake_case : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""", return_value=a_ ) as mock_head:
_snake_case : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
with self.assertRaises(a_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""", subfolder="""feature_extractor""" )
self.assertIsNotNone(a_ )
@is_staging_test
class lowercase( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def UpperCamelCase_ ( cls: Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("""test-image-processor""", use_auth_token=self._token )
_snake_case : Tuple = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_, getattr(a_, a_ ) )
# Reset repo
delete_repo(token=self._token, repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_, repo_id="""test-image-processor""", push_to_hub=a_, use_auth_token=self._token )
_snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_, getattr(a_, a_ ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("""valid_org/test-image-processor""", use_auth_token=self._token )
_snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_, getattr(a_, a_ ) )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_, repo_id="""valid_org/test-image-processor-org""", push_to_hub=a_, use_auth_token=self._token )
_snake_case : List[Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_, getattr(a_, a_ ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_snake_case : List[str] = CustomImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub("""test-dynamic-image-processor""", use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""}, )
_snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=a_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, """CustomImageProcessor""" )
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import itertools
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def UpperCAmelCase__ (snake_case__ : int = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A_ = pytest.mark.integration
A_ = {'''comet'''}
A_ = importlib.util.find_spec('''fairseq''') is not None
A_ = {'''code_eval'''}
A_ = os.name == '''nt'''
A_ = {'''bertscore''', '''frugalscore''', '''perplexity'''}
A_ = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
@wraps(snake_case__ )
def wrapper(self : Dict , snake_case__ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
@wraps(snake_case__ )
def wrapper(self : Optional[Any] , snake_case__ : str ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
@wraps(snake_case__ )
def wrapper(self : Dict , snake_case__ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__a , __a , __a )
@local
class lowercase( parameterized.TestCase ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCamelCase_ ( self: List[str], a_: List[str] ):
'''simple docstring'''
_snake_case : Any = """[...]"""
_snake_case : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""", a_ ) ).module_path )
_snake_case : List[str] = datasets.load.import_main_class(metric_module.__name__, dataset=a_ )
# check parameters
_snake_case : str = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(a_, metric_module.__name__ ):
with self.use_local_metrics():
try:
_snake_case : int = doctest.testmod(a_, verbose=a_, raise_on_error=a_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@slow
def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = """[...]"""
_snake_case : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""", a_ ) ).module_path )
# run doctest
with self.use_local_metrics():
_snake_case : int = doctest.testmod(a_, verbose=a_, raise_on_error=a_ )
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@contextmanager
def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: Dict ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a_ ):
yield
else:
yield
@contextmanager
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def load_local_metric(a_: Any, *a_: List[Any], **a_: Optional[Any] ):
return load_metric(os.path.join("""metrics""", a_ ), *a_, **a_ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_snake_case : str = load_local_metric
yield
@classmethod
def UpperCamelCase_ ( cls: List[str], a_: Union[str, Any] ):
'''simple docstring'''
def wrapper(a_: int ):
_snake_case : str = contextmanager(a_ )
_snake_case : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: int, a_: Union[str, Any] ):
'''simple docstring'''
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
_snake_case : Union[str, Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
import torch
def bert_cos_score_idf(snake_case__ : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : List[str] , **snake_case__ : int ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
_snake_case : Union[str, Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
def load_from_checkpoint(snake_case__ : Dict ):
class lowercase:
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int], a_: Union[str, Any], *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
assert len(a_ ) == 2
_snake_case : Optional[int] = [0.19, 0.92]
return scores, sum(a_ ) / len(a_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
_snake_case : Any = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
_snake_case : Union[str, Any] = load_from_checkpoint
yield
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
_snake_case : List[str] = """ERROR"""
_snake_case : int = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
metric.compute(predictions=[] , references=[] , scheme=snake_case__ )
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
from math import ceil
def UpperCAmelCase__ (snake_case__ : int = 10_01 ):
"""simple docstring"""
_snake_case : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_snake_case : int = 2 * i + 1
_snake_case : List[str] = 2 * i
_snake_case : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A_ = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "facebook/nllb-200-distilled-600M"
lowercase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ = "translator"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = LANGUAGE_CODES
lowercase__ = ["text", "text", "text"]
lowercase__ = ["text"]
def UpperCamelCase_ ( self: str, a_: int, a_: Any, a_: Dict ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
_snake_case : List[Any] = self.lang_to_code[src_lang]
_snake_case : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a_, return_tensors="""pt""", src_lang=a_, tgt_lang=a_ )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any] ):
'''simple docstring'''
return self.model.generate(**a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=a_ )
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
A_ = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
class lowercase( __a , __a ):
'''simple docstring'''
@register_to_config
def __init__( self: List[Any], a_: int = 16, a_: int = 88, a_: Optional[int] = None, a_: Optional[int] = None, a_: int = 1, a_: float = 0.0, a_: int = 32, a_: Optional[int] = None, a_: bool = False, a_: Optional[int] = None, a_: str = "geglu", a_: bool = True, a_: bool = True, ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = num_attention_heads
_snake_case : int = attention_head_dim
_snake_case : Optional[int] = num_attention_heads * attention_head_dim
_snake_case : Any = in_channels
_snake_case : List[str] = torch.nn.GroupNorm(num_groups=a_, num_channels=a_, eps=1E-6, affine=a_ )
_snake_case : Dict = nn.Linear(a_, a_ )
# 3. Define transformers blocks
_snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
a_, a_, a_, dropout=a_, cross_attention_dim=a_, activation_fn=a_, attention_bias=a_, double_self_attention=a_, norm_elementwise_affine=a_, )
for d in range(a_ )
] )
_snake_case : Tuple = nn.Linear(a_, a_ )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: List[str]=None, a_: Union[str, Any]=None, a_: Tuple=None, a_: Any=1, a_: int=None, a_: bool = True, ):
'''simple docstring'''
_snake_case : Any = hidden_states.shape
_snake_case : List[str] = batch_frames // num_frames
_snake_case : Tuple = hidden_states
_snake_case : List[str] = hidden_states[None, :].reshape(a_, a_, a_, a_, a_ )
_snake_case : Dict = hidden_states.permute(0, 2, 1, 3, 4 )
_snake_case : Tuple = self.norm(a_ )
_snake_case : Optional[int] = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, a_, a_ )
_snake_case : Any = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
_snake_case : List[str] = block(
a_, encoder_hidden_states=a_, timestep=a_, cross_attention_kwargs=a_, class_labels=a_, )
# 3. Output
_snake_case : Tuple = self.proj_out(a_ )
_snake_case : List[str] = (
hidden_states[None, None, :]
.reshape(a_, a_, a_, a_, a_ )
.permute(0, 3, 4, 1, 2 )
.contiguous()
)
_snake_case : List[Any] = hidden_states.reshape(a_, a_, a_, a_ )
_snake_case : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = XLMProphetNetTokenizer
lowercase__ = False
lowercase__ = True
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Tuple = XLMProphetNetTokenizer(a_, keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = """[PAD]"""
_snake_case : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """[PAD]""" )
self.assertEqual(vocab_keys[1], """[CLS]""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(a_ ), 1_012 )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_012 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = XLMProphetNetTokenizer(a_, keep_accents=a_ )
_snake_case : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : int = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
_snake_case : str = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
], )
@cached_property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = """Hello World!"""
_snake_case : Optional[int] = [35_389, 6_672, 49, 2]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""microsoft/xprophetnet-large-wiki100-cased""", revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""", )
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any], a_: Union[str, Any], a_: Tuple=7, a_: List[Any]=3, a_: int=30, a_: Dict=400, a_: str=True, a_: Optional[int]=None, a_: int=True, a_: Optional[Any]=[0.5, 0.5, 0.5], a_: Any=[0.5, 0.5, 0.5], a_: List[Any]=True, a_: Union[str, Any]=1 / 255, a_: List[Any]=True, ):
'''simple docstring'''
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : str = num_channels
_snake_case : List[Any] = min_resolution
_snake_case : Tuple = max_resolution
_snake_case : Union[str, Any] = do_resize
_snake_case : List[str] = size
_snake_case : Union[str, Any] = do_normalize
_snake_case : List[Any] = image_mean
_snake_case : List[str] = image_std
_snake_case : Optional[int] = do_rescale
_snake_case : List[str] = rescale_factor
_snake_case : List[Any] = do_pad
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self: Any, a_: Any, a_: Tuple=False ):
'''simple docstring'''
if not batched:
_snake_case : int = image_inputs[0]
if isinstance(a_, Image.Image ):
_snake_case : Optional[int] = image.size
else:
_snake_case : Dict = image.shape[1], image.shape[2]
if w < h:
_snake_case : List[Any] = int(self.size["""shortest_edge"""] * h / w )
_snake_case : str = self.size["""shortest_edge"""]
elif w > h:
_snake_case : Any = self.size["""shortest_edge"""]
_snake_case : Dict = int(self.size["""shortest_edge"""] * w / h )
else:
_snake_case : Dict = self.size["""shortest_edge"""]
_snake_case : Dict = self.size["""shortest_edge"""]
else:
_snake_case : Union[str, Any] = []
for image in image_inputs:
_snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(a_, key=lambda a_ : item[0] )[0]
_snake_case : Union[str, Any] = max(a_, key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad, a_ )
_snake_case : Dict = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=a_ )
self.assertEqual(image_processor.size, {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : Dict = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : Dict = self.image_processor_tester.get_expected_values(a_, batched=a_ )
_snake_case : Any = image_processing(a_, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
_snake_case : Any = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : List[Any] = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : Tuple = image_processing(a_, return_tensors="""pt""" ).pixel_values
_snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(a_, batched=a_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : str = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : List[str] = image_processing(a_, return_tensors="""pt""" ).pixel_values
_snake_case : List[str] = self.image_processor_tester.get_expected_values(a_, batched=a_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
_snake_case : Dict = self.image_processing_class(do_resize=a_, do_normalize=a_, do_rescale=a_ )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_snake_case : int = image_processing_a.pad(a_, return_tensors="""pt""" )
_snake_case : int = image_processing_a(a_, return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""], encoded_images["""pixel_values"""], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""", """r""" ) as f:
_snake_case : int = json.loads(f.read() )
_snake_case : int = {"""image_id""": 39_769, """annotations""": target}
# encode them
_snake_case : Optional[Any] = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
_snake_case : int = image_processing(images=a_, annotations=a_, return_tensors="""pt""" )
# verify pixel values
_snake_case : Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape, a_ )
_snake_case : int = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], a_, atol=1E-4 ) )
# verify area
_snake_case : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], a_ ) )
# verify boxes
_snake_case : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, a_ )
_snake_case : Union[str, Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], a_, atol=1E-3 ) )
# verify image_id
_snake_case : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], a_ ) )
# verify is_crowd
_snake_case : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], a_ ) )
# verify class_labels
_snake_case : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], a_ ) )
# verify orig_size
_snake_case : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], a_ ) )
# verify size
_snake_case : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], a_ ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""", """r""" ) as f:
_snake_case : Any = json.loads(f.read() )
_snake_case : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
_snake_case : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_snake_case : List[Any] = YolosImageProcessor(format="""coco_panoptic""" )
_snake_case : str = image_processing(images=a_, annotations=a_, masks_path=a_, return_tensors="""pt""" )
# verify pixel values
_snake_case : Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape, a_ )
_snake_case : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], a_, atol=1E-4 ) )
# verify area
_snake_case : Any = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], a_ ) )
# verify boxes
_snake_case : int = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, a_ )
_snake_case : str = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], a_, atol=1E-3 ) )
# verify image_id
_snake_case : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], a_ ) )
# verify is_crowd
_snake_case : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], a_ ) )
# verify class_labels
_snake_case : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], a_ ) )
# verify masks
_snake_case : Tuple = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item(), a_ )
# verify orig_size
_snake_case : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], a_ ) )
# verify size
_snake_case : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], a_ ) )
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["audio_values", "audio_mask"]
def __init__( self: Dict, a_: Tuple=2_048, a_: str=1, a_: Any=[16, 16], a_: str=128, a_: int=44_100, a_: Optional[Any]=86, a_: int=2_048, a_: Optional[Any]=0.0, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(
feature_size=a_, sampling_rate=a_, padding_value=a_, **a_, )
_snake_case : Optional[int] = spectrogram_length
_snake_case : Dict = num_channels
_snake_case : int = patch_size
_snake_case : Any = feature_size // self.patch_size[1]
_snake_case : List[Any] = n_fft
_snake_case : List[str] = sampling_rate // hop_length_to_sampling_rate
_snake_case : int = sampling_rate
_snake_case : Dict = padding_value
_snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=a_, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=a_, norm="""slaney""", mel_scale="""slaney""", ).T
def UpperCamelCase_ ( self: Optional[int], a_: np.array ):
'''simple docstring'''
_snake_case : List[Any] = spectrogram(
a_, window_function(self.n_fft, """hann""" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="""dB""", db_range=80.0, )
_snake_case : Dict = log_spec[:, :-1]
_snake_case : Union[str, Any] = log_spec - 20.0
_snake_case : int = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self: Any, a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: Optional[Union[str, TensorType]] = None, a_: Optional[bool] = True, a_: Optional[int] = None, a_: bool = False, a_: bool = False, **a_: str, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_snake_case : str = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_snake_case : Optional[Any] = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : List[str] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
_snake_case : int = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_snake_case : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], a_ ):
_snake_case : str = [np.asarray(a_, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_snake_case : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_snake_case : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_snake_case : Optional[int] = np.array(a_ ).astype(np.floataa )
# convert into correct format for padding
_snake_case : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_snake_case : Optional[Any] = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_snake_case : Any = padded_audio_features * self.padding_value
for i in range(len(a_ ) ):
_snake_case : Optional[Any] = audio_features[i]
_snake_case : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
_snake_case : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_snake_case : Optional[Any] = {"""audio_values""": padded_audio_features}
_snake_case : int = BatchFeature(data=a_, tensor_type=a_ )
return encoded_inputs
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
super().__init__(*a_, **a_ )
_snake_case : int = {}
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict, *a_: Any, **a_: List[Any] ):
'''simple docstring'''
_snake_case : Dict = super().add_tokens(a_, *a_, **a_ )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, *a_: Dict, a_: int=1, **a_: Any ):
'''simple docstring'''
_snake_case : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(a_, *a_, **a_ )
output.append(a_ )
else:
_snake_case : int = []
for i in range(a_ ):
_snake_case : Union[str, Any] = placeholder_token + f"_{i}"
self.try_adding_tokens(a_, *a_, **a_ )
output.append(a_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
_snake_case : Tuple = output
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: Dict=False, a_: Dict=1.0 ):
'''simple docstring'''
if isinstance(a_, a_ ):
_snake_case : Tuple = []
for i in range(len(a_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=a_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : str = self.token_map[placeholder_token]
_snake_case : Optional[int] = tokens[: 1 + int(len(a_ ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : Any = copy.copy(a_ )
random.shuffle(a_ )
_snake_case : str = text.replace(a_, """ """.join(a_ ) )
return text
def __call__( self: Union[str, Any], a_: str, *a_: Union[str, Any], a_: int=False, a_: Dict=1.0, **a_: int ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
a_, vector_shuffle=a_, prop_tokens_to_load=a_ ), *a_, **a_, )
def UpperCamelCase_ ( self: Optional[int], a_: Optional[Any], *a_: Dict, a_: Optional[Any]=False, a_: Optional[Any]=1.0, **a_: Union[str, Any] ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
a_, vector_shuffle=a_, prop_tokens_to_load=a_ ), *a_, **a_, )
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ = logging.getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
lowercase__ = field(metadata={"help": "Should contain the data files for the task."} )
lowercase__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=__a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case__ )
# Set seed
set_seed(training_args.seed )
try:
_snake_case : int = processors[data_args.task_name]()
_snake_case : Any = processor.get_labels()
_snake_case : int = len(snake_case__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
_snake_case : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case__ , p.label_ids )}
# Data collator
_snake_case : Any = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case : int = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_snake_case : Optional[int] = trainer.evaluate()
_snake_case : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case__ , snake_case__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case__ )
return results
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
A_ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
A_ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), codebase_urls=["""https://github.com/jitsi/jiwer/"""], reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
], )
def UpperCamelCase_ ( self: Optional[int], a_: Any=None, a_: int=None, a_: str=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(a_, a_ )["wer"]
else:
_snake_case : int = 0
_snake_case : int = 0
for prediction, reference in zip(a_, a_ ):
_snake_case : Optional[int] = compute_measures(a_, a_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
while a != 0:
_snake_case : Tuple = b % a, a
return b
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if gcd(snake_case__ , snake_case__ ) != 1:
_snake_case : Union[str, Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(snake_case__ )
_snake_case : str = 1, 0, a
_snake_case : List[str] = 0, 1, m
while va != 0:
_snake_case : Union[str, Any] = ua // va
_snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = parse_args()
# Import training_script as a module.
_snake_case : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_snake_case : List[str] = script_fpath.stem
_snake_case : Any = importlib.import_module(snake_case__ )
# Patch sys.argv
_snake_case : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str=0.9_99 , snake_case__ : Dict="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_snake_case : List[Any] = []
for i in range(snake_case__ ):
_snake_case : Tuple = i / num_diffusion_timesteps
_snake_case : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = [e.name for e in KarrasDiffusionSchedulers]
lowercase__ = 2
@register_to_config
def __init__( self: Tuple, a_: int = 1_000, a_: float = 0.00_085, a_: float = 0.012, a_: str = "linear", a_: Optional[Union[np.ndarray, List[float]]] = None, a_: str = "epsilon", a_: str = "linspace", a_: int = 0, ):
'''simple docstring'''
if trained_betas is not None:
_snake_case : Tuple = torch.tensor(a_, dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case : Dict = torch.linspace(a_, a_, a_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : List[str] = (
torch.linspace(beta_start**0.5, beta_end**0.5, a_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : str = betas_for_alpha_bar(a_ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
_snake_case : int = 1.0 - self.betas
_snake_case : Optional[Any] = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(a_, a_, a_ )
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Dict=None ):
'''simple docstring'''
if schedule_timesteps is None:
_snake_case : List[Any] = self.timesteps
_snake_case : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_snake_case : List[Any] = 1 if len(a_ ) > 1 else 0
else:
_snake_case : int = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
_snake_case : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self: Optional[Any], a_: torch.FloatTensor, a_: Union[float, torch.FloatTensor], ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.index_for_timestep(a_ )
if self.state_in_first_order:
_snake_case : List[str] = self.sigmas[step_index]
else:
_snake_case : Any = self.sigmas_interpol[step_index]
_snake_case : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self: List[str], a_: int, a_: Union[str, torch.device] = None, a_: Optional[int] = None, ):
'''simple docstring'''
_snake_case : Union[str, Any] = num_inference_steps
_snake_case : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_snake_case : Optional[int] = np.linspace(0, num_train_timesteps - 1, a_, dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_snake_case : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case : int = (np.arange(0, a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_snake_case : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case : Tuple = (np.arange(a_, 0, -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
_snake_case : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_snake_case : Dict = torch.from_numpy(np.log(a_ ) ).to(a_ )
_snake_case : int = np.interp(a_, np.arange(0, len(a_ ) ), a_ )
_snake_case : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_snake_case : str = torch.from_numpy(a_ ).to(device=a_ )
# interpolate sigmas
_snake_case : int = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
_snake_case : Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_snake_case : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a_ ).startswith("""mps""" ):
# mps does not support float64
_snake_case : Dict = torch.from_numpy(a_ ).to(a_, dtype=torch.floataa )
else:
_snake_case : Optional[Any] = torch.from_numpy(a_ ).to(a_ )
# interpolate timesteps
_snake_case : Tuple = self.sigma_to_t(a_ ).to(a_, dtype=timesteps.dtype )
_snake_case : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
_snake_case : List[str] = torch.cat([timesteps[:1], interleaved_timesteps] )
_snake_case : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_snake_case : List[str] = defaultdict(a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple ):
'''simple docstring'''
_snake_case : Tuple = sigma.log()
# get distribution
_snake_case : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_snake_case : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_snake_case : Dict = low_idx + 1
_snake_case : List[str] = self.log_sigmas[low_idx]
_snake_case : int = self.log_sigmas[high_idx]
# interpolate sigmas
_snake_case : Any = (low - log_sigma) / (low - high)
_snake_case : Dict = w.clamp(0, 1 )
# transform interpolation to time range
_snake_case : Tuple = (1 - w) * low_idx + w * high_idx
_snake_case : List[Any] = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return self.sample is None
def UpperCamelCase_ ( self: Any, a_: Union[torch.FloatTensor, np.ndarray], a_: Union[float, torch.FloatTensor], a_: Union[torch.FloatTensor, np.ndarray], a_: bool = True, ):
'''simple docstring'''
_snake_case : Any = self.index_for_timestep(a_ )
# advance index counter by 1
_snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_snake_case : Union[str, Any] = self.sigmas[step_index]
_snake_case : Optional[Any] = self.sigmas_interpol[step_index + 1]
_snake_case : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_snake_case : int = self.sigmas[step_index - 1]
_snake_case : Any = self.sigmas_interpol[step_index]
_snake_case : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_snake_case : Optional[int] = 0
_snake_case : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_snake_case : str = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_snake_case : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_snake_case : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_snake_case : Tuple = sigma_interpol - sigma_hat
# store for 2nd order step
_snake_case : str = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_snake_case : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_snake_case : Any = sigma_next - sigma_hat
_snake_case : Optional[int] = self.sample
_snake_case : Optional[Any] = None
_snake_case : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: torch.FloatTensor, a_: torch.FloatTensor, a_: torch.FloatTensor, ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
_snake_case : Dict = self.timesteps.to(original_samples.device, dtype=torch.floataa )
_snake_case : List[Any] = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
_snake_case : Optional[Any] = self.timesteps.to(original_samples.device )
_snake_case : Any = timesteps.to(original_samples.device )
_snake_case : List[str] = [self.index_for_timestep(a_, a_ ) for t in timesteps]
_snake_case : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_snake_case : List[Any] = sigma.unsqueeze(-1 )
_snake_case : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self: Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''distilbert-base-uncased''': 5_12,
'''distilbert-base-uncased-distilled-squad''': 5_12,
'''distilbert-base-cased''': 5_12,
'''distilbert-base-cased-distilled-squad''': 5_12,
'''distilbert-base-german-cased''': 5_12,
'''distilbert-base-multilingual-cased''': 5_12,
}
A_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase( __a ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = DistilBertTokenizer
def __init__( self: int, a_: Union[str, Any]=None, a_: int=None, a_: str=True, a_: Dict="[UNK]", a_: List[str]="[SEP]", a_: Dict="[PAD]", a_: Union[str, Any]="[CLS]", a_: Dict="[MASK]", a_: Optional[Any]=True, a_: List[Any]=None, **a_: Optional[int], ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Optional[Any] = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : Optional[Any] = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Dict = normalizer_class(**a_ )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self: Tuple, a_: Union[str, Any], a_: str=None ):
'''simple docstring'''
_snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int | str] ):
"""simple docstring"""
create_state_space_tree(snake_case__ , [] , 0 , [0 for i in range(len(snake_case__ ) )] )
def UpperCAmelCase__ (snake_case__ : list[int | str] , snake_case__ : list[int | str] , snake_case__ : int , snake_case__ : list[int] , ):
"""simple docstring"""
if index == len(snake_case__ ):
print(snake_case__ )
return
for i in range(len(snake_case__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_snake_case : Optional[Any] = True
create_state_space_tree(snake_case__ , snake_case__ , index + 1 , snake_case__ )
current_sequence.pop()
_snake_case : Optional[int] = False
A_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
A_ = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase__ (snake_case__ : Callable ):
"""simple docstring"""
@wraps(snake_case__ )
def _inner_fn(*snake_case__ : Any , **snake_case__ : Tuple ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , snake_case__ , )
return fn(*snake_case__ , **snake_case__ )
return _inner_fn
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowercase( __a ):
'''simple docstring'''
def __init__( self: int, a_: int, a_: int, a_: Any=1_024, a_: List[Any]=1_024, a_: Optional[Any]=3.6 ):
'''simple docstring'''
_snake_case : Union[str, Any] = tokenizer
_snake_case : int = tokenizer.bos_token_id
_snake_case : Any = dataset
_snake_case : str = seq_length
_snake_case : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = iter(self.dataset )
_snake_case : Optional[int] = True
while more_examples:
_snake_case : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(a_ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
_snake_case : Any = False
break
_snake_case : List[str] = tokenizer(a_, truncation=a_ )["""input_ids"""]
_snake_case : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(a_ ), self.seq_length ):
_snake_case : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(a_ ) == self.seq_length:
yield torch.tensor(a_ )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = {"""streaming""": True}
_snake_case : Dict = load_dataset(args.dataset_name , split="""train""" , **snake_case__ )
_snake_case : Union[str, Any] = ConstantLengthDataset(snake_case__ , snake_case__ , seq_length=args.seq_length )
_snake_case : str = DataLoader(snake_case__ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
model.eval()
_snake_case : int = []
for step, batch in enumerate(snake_case__ ):
with torch.no_grad():
_snake_case : int = model(snake_case__ , labels=snake_case__ )
_snake_case : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_snake_case : List[str] = torch.mean(torch.cat(snake_case__ ) )
try:
_snake_case : Tuple = torch.exp(snake_case__ )
except OverflowError:
_snake_case : Dict = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
A_ = Accelerator()
# Parse configuration
A_ = HfArgumentParser(EvaluationArguments)
A_ = parser.parse_args()
set_seed(args.seed)
# Logging
A_ = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
A_ , A_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
A_ , A_ = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[Any], *a_: str, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, **a_: str ):
'''simple docstring'''
super().__init__(*a_, **a_ )
_snake_case : int = eval_examples
_snake_case : Optional[int] = post_process_function
_snake_case : Dict = quant_trainer_args
_snake_case : Optional[int] = 128 # default number of calibration samples
def UpperCamelCase_ ( self: Tuple, a_: Any=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_snake_case : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
_snake_case : List[Any] = self._remove_unused_columns(a_, description="""Calibration""" )
return DataLoader(
a_, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=a_, )
def UpperCamelCase_ ( self: List[str], a_: Any=None ):
'''simple docstring'''
_snake_case : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
_snake_case : Optional[int] = self.get_calib_dataloader(a_ )
_snake_case : int = self.model
quant_trainer.configure_model(a_, self.quant_trainer_args, calib=a_ )
model.eval()
quant_trainer.enable_calibration(a_ )
logger.info("""***** Running calibration *****""" )
logger.info(f" Num examples = {self.calib_num}" )
logger.info(f" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(a_ ):
# Prediction step
_snake_case : str = self.prediction_step(a_, a_, prediction_loss_only=a_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a_, self.quant_trainer_args )
_snake_case : str = model
def UpperCamelCase_ ( self: List[Any], a_: Optional[int]=None, a_: str=None, a_: Optional[int]=None, a_: str = "eval" ):
'''simple docstring'''
_snake_case : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case : int = self.get_eval_dataloader(a_ )
_snake_case : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case : int = self.compute_metrics
_snake_case : str = None
_snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case : str = eval_loop(
a_, description="""Evaluation""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=a_, )
finally:
_snake_case : Any = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_snake_case : Any = self.post_process_function(a_, a_, output.predictions )
_snake_case : Union[str, Any] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
_snake_case : Dict = metrics.pop(a_ )
self.log(a_ )
else:
_snake_case : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case : List[Any] = self.callback_handler.on_evaluate(self.args, self.state, self.control, a_ )
return metrics
def UpperCamelCase_ ( self: Optional[Any], a_: int, a_: int, a_: Optional[Any]=None, a_: str = "test" ):
'''simple docstring'''
_snake_case : Dict = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case : Dict = self.compute_metrics
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case : List[str] = eval_loop(
a_, description="""Prediction""", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=a_, )
finally:
_snake_case : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case : List[Any] = self.post_process_function(a_, a_, output.predictions, """predict""" )
_snake_case : int = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
_snake_case : List[str] = metrics.pop(a_ )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=a_ )
def UpperCamelCase_ ( self: Optional[int], a_: int="./" ):
'''simple docstring'''
_snake_case : Optional[int] = self.eval_dataset
_snake_case : Union[str, Any] = self.get_eval_dataloader(a_ )
_snake_case : str = next(iter(a_ ) )
# saving device - to make it consistent
_snake_case : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_snake_case : List[Any] = tuple(v.to(a_ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_snake_case : Dict = True
_snake_case : str = self.model.to(a_ )
model.eval()
model.float()
_snake_case : List[Any] = model.module if hasattr(a_, """module""" ) else model
quant_trainer.configure_model(a_, self.quant_trainer_args )
_snake_case : Dict = os.path.join(a_, """model.onnx""" )
logger.info(f"exporting model to {output_model_file}" )
_snake_case : List[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
a_, a_, a_, export_params=a_, opset_version=13, do_constant_folding=a_, input_names=["""input_ids""", """attention_mask""", """token_type_ids"""], output_names=["""output_start_logits""", """output_end_logits"""], dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
}, verbose=a_, )
logger.info("""onnx export finished""" )
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = data
_snake_case : List[Any] = None
_snake_case : int = None
def UpperCAmelCase__ ():
print("""\n********Press N to stop entering at any point of time********\n""" )
_snake_case : Union[str, Any] = input("""Enter the value of the root node: """ ).strip().lower()
_snake_case : queue.Queue = queue.Queue()
_snake_case : List[str] = TreeNode(int(snake_case__ ) )
q.put(snake_case__ )
while not q.empty():
_snake_case : Union[str, Any] = q.get()
_snake_case : str = F"Enter the left node of {node_found.data}: "
_snake_case : int = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
_snake_case : Optional[Any] = TreeNode(int(snake_case__ ) )
_snake_case : List[Any] = left_node
q.put(snake_case__ )
_snake_case : List[str] = F"Enter the right node of {node_found.data}: "
_snake_case : Optional[Any] = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
_snake_case : List[Any] = TreeNode(int(snake_case__ ) )
_snake_case : Union[str, Any] = right_node
q.put(snake_case__ )
raise
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
_snake_case : queue.Queue = queue.Queue()
q.put(snake_case__ )
while not q.empty():
_snake_case : int = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
_snake_case : queue.Queue = queue.Queue()
q.put(snake_case__ )
while not q.empty():
_snake_case : Any = []
while not q.empty():
_snake_case : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case__ )
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
_snake_case : list[TreeNode] = []
_snake_case : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case__ )
_snake_case : List[Any] = n.left
# end of while means current node doesn't have left child
_snake_case : Optional[int] = stack.pop()
# start to traverse its right child
_snake_case : Optional[int] = n.right
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
_snake_case : list[TreeNode] = []
_snake_case : Dict = node
while n or stack:
while n:
stack.append(snake_case__ )
_snake_case : Tuple = n.left
_snake_case : List[str] = stack.pop()
print(n.data , end=""",""" )
_snake_case : Dict = n.right
def UpperCAmelCase__ (snake_case__ : TreeNode ):
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
_snake_case : Union[str, Any] = [], []
_snake_case : List[Any] = node
stacka.append(snake_case__ )
while stacka: # to find the reversed order of post order, store it in stack2
_snake_case : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def UpperCAmelCase__ (snake_case__ : str = "" , snake_case__ : List[str]=50 , snake_case__ : str="*" ):
if not s:
return "\n" + width * char
_snake_case : Any = divmod(width - len(snake_case__ ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
A_ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
A_ = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: Dict, a_: str, a_: Union[str, Any]=False, a_: Any=False, a_: Tuple=False, a_: Tuple=None, a_: str=None, a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Dict[str, Any]] = None, **a_: Union[str, Any], ):
'''simple docstring'''
_snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case : Tuple = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
_snake_case : str = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_snake_case : Tuple = """<|endoftext|>""" if eos_token is None else eos_token
_snake_case : str = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_snake_case : List[Any] = unk_token if pad_token is None else pad_token
_snake_case : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
_snake_case : str = """<pad>""" if pad_token is None else pad_token
_snake_case : List[str] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
_snake_case : Dict = do_lower_case
_snake_case : Any = remove_space
_snake_case : Optional[int] = keep_accents
_snake_case : Optional[int] = vocab_file
_snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# Used for whitespace normalization in input texts
# fmt : off
_snake_case : str = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_snake_case : Union[str, Any] = re.compile(
f"[{''.join(map(a_, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8_203] ) )}]" )
def __getstate__( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self: Any, a_: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
_snake_case : Union[str, Any] = {}
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
_snake_case : Dict = self.non_printing_characters_re.sub("""""", a_ )
# Normalize whitespaces
_snake_case : Any = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
_snake_case : Optional[Any] = unicodedata.normalize("""NFC""", a_ )
return text
def UpperCamelCase_ ( self: str, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = self.preprocess_text(a_ )
return self.sp_model.encode(a_, out_type=a_ )
def UpperCamelCase_ ( self: List[Any], a_: str ):
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
@staticmethod
def UpperCamelCase_ ( a_: str ):
'''simple docstring'''
return out_string
def UpperCamelCase_ ( self: Tuple, a_: List[str] ):
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = """"""
_snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
_snake_case : int = True
_snake_case : int = []
else:
current_sub_tokens.append(a_ )
_snake_case : Union[str, Any] = False
out_string += self.sp_model.decode(a_ )
return out_string
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : Optional[Any] = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, """wb""" ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCamelCase_ ( self: Any, a_: Union[str, List[str]], a_: Union[str, bool] = False ):
'''simple docstring'''
if isinstance(a_, a_ ):
_snake_case : str = self.preprocess_text(a_ )
_snake_case : Optional[Any] = self.sp_model.encode(a_ )
else:
_snake_case : int = [self.preprocess_text(a_ ) for t in text]
_snake_case : List[Any] = self.sp_model.encode(a_ )
if return_tensors is True or return_tensors == "pt":
_snake_case : List[str] = torch.tensor(a_ )
return token_ids
def UpperCamelCase_ ( self: Optional[int], a_: Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(a_ )
def UpperCamelCase_ ( self: Tuple, a_: "Conversation" ):
'''simple docstring'''
_snake_case : Union[str, Any] = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
_snake_case : List[str] = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(a_ ) + f"{self.bos_token}Bot:"
)
return self.encode(text=a_ )
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A_ = False
class lowercase( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""", torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Dict = pipe.dual_guided(
prompt="""first prompt""", image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=2, output_type="""numpy""", ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_snake_case : str = VersatileDiffusionPipeline.from_pretrained(a_, torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = generator.manual_seed(0 )
_snake_case : int = pipe.dual_guided(
prompt="""first prompt""", image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=2, output_type="""numpy""", ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""", torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """cyberpunk 2077"""
_snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_snake_case : str = torch.manual_seed(0 )
_snake_case : Tuple = pipe.dual_guided(
prompt=a_, image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=50, output_type="""numpy""", ).images
_snake_case : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : Dict = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_snake_case : Optional[int] = """A painting of a squirrel eating a burger """
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Dict = pipe.text_to_image(
prompt=a_, generator=a_, guidance_scale=7.5, num_inference_steps=50, output_type="""numpy""" ).images
_snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : str = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_snake_case : List[Any] = pipe.image_variation(a_, generator=a_, output_type="""numpy""" ).images
_snake_case : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : Any = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
from typing import Any
class lowercase:
'''simple docstring'''
def __init__( self: Union[str, Any], a_: Any ):
'''simple docstring'''
_snake_case : int = data
_snake_case : List[str] = None
class lowercase:
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
_snake_case : str = None
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.head
while temp is not None:
print(temp.data, end=""" """ )
_snake_case : List[Any] = temp.next
print()
def UpperCamelCase_ ( self: Optional[int], a_: Any ):
'''simple docstring'''
_snake_case : Dict = Node(a_ )
_snake_case : str = self.head
_snake_case : List[Any] = new_node
def UpperCamelCase_ ( self: Optional[Any], a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_snake_case : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : Optional[Any] = node_a.next
_snake_case : str = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : Union[str, Any] = node_a.next
if node_a is None or node_a is None:
return
_snake_case : Dict = node_a.data, node_a.data
if __name__ == "__main__":
A_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "deformable_detr"
lowercase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: Tuple, a_: List[str]=True, a_: str=None, a_: Tuple=3, a_: Optional[int]=300, a_: List[Any]=1_024, a_: Union[str, Any]=6, a_: int=1_024, a_: Optional[int]=8, a_: Tuple=6, a_: List[str]=1_024, a_: Dict=8, a_: str=0.0, a_: Dict=True, a_: Any="relu", a_: List[str]=256, a_: List[Any]=0.1, a_: List[Any]=0.0, a_: Optional[int]=0.0, a_: Tuple=0.02, a_: List[Any]=1.0, a_: Union[str, Any]=True, a_: Dict=False, a_: List[str]="sine", a_: Optional[int]="resnet50", a_: List[str]=True, a_: Optional[Any]=False, a_: Tuple=4, a_: List[str]=4, a_: Tuple=4, a_: List[Any]=False, a_: List[Any]=300, a_: Dict=False, a_: Optional[int]=1, a_: int=5, a_: Union[str, Any]=2, a_: Optional[int]=1, a_: Tuple=1, a_: Tuple=5, a_: int=2, a_: Tuple=0.1, a_: Dict=0.25, a_: str=False, **a_: Dict, ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_snake_case : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(a_, a_ ):
_snake_case : List[Any] = backbone_config.get("""model_type""" )
_snake_case : int = CONFIG_MAPPING[backbone_model_type]
_snake_case : Optional[Any] = config_class.from_dict(a_ )
_snake_case : Any = use_timm_backbone
_snake_case : List[str] = backbone_config
_snake_case : int = num_channels
_snake_case : Any = num_queries
_snake_case : str = max_position_embeddings
_snake_case : int = d_model
_snake_case : Optional[int] = encoder_ffn_dim
_snake_case : List[str] = encoder_layers
_snake_case : int = encoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : int = decoder_layers
_snake_case : List[Any] = decoder_attention_heads
_snake_case : int = dropout
_snake_case : Tuple = attention_dropout
_snake_case : int = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : List[Any] = init_std
_snake_case : int = init_xavier_std
_snake_case : Optional[int] = encoder_layerdrop
_snake_case : int = auxiliary_loss
_snake_case : Any = position_embedding_type
_snake_case : Optional[Any] = backbone
_snake_case : Tuple = use_pretrained_backbone
_snake_case : int = dilation
# deformable attributes
_snake_case : str = num_feature_levels
_snake_case : str = encoder_n_points
_snake_case : Optional[int] = decoder_n_points
_snake_case : str = two_stage
_snake_case : Optional[int] = two_stage_num_proposals
_snake_case : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_snake_case : Tuple = class_cost
_snake_case : List[Any] = bbox_cost
_snake_case : List[str] = giou_cost
# Loss coefficients
_snake_case : List[Any] = mask_loss_coefficient
_snake_case : List[str] = dice_loss_coefficient
_snake_case : Any = bbox_loss_coefficient
_snake_case : Dict = giou_loss_coefficient
_snake_case : Any = eos_coefficient
_snake_case : str = focal_alpha
_snake_case : int = disable_custom_kernels
super().__init__(is_encoder_decoder=a_, **a_ )
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return self.d_model
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_snake_case : Dict = self.backbone_config.to_dict()
_snake_case : int = self.__class__.model_type
return output
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ = logging.getLogger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sequence-classification"
def __init__( self: Union[str, Any], a_: Any ):
'''simple docstring'''
if type(a_ ) == dict:
_snake_case : Dict = Namespace(**a_ )
_snake_case : List[Any] = glue_output_modes[hparams.task]
_snake_case : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_, a_, self.mode )
def UpperCamelCase_ ( self: List[Any], **a_: Optional[Any] ):
'''simple docstring'''
return self.model(**a_ )
def UpperCamelCase_ ( self: Optional[int], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_snake_case : Union[str, Any] = self(**a_ )
_snake_case : List[str] = outputs[0]
_snake_case : Tuple = self.trainer.lr_schedulers[0]["""scheduler"""]
_snake_case : List[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.hparams
_snake_case : List[str] = processors[args.task]()
_snake_case : Optional[Any] = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case : Union[str, Any] = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""", a_ )
else:
logger.info("""Creating features from dataset file at %s""", args.data_dir )
_snake_case : Any = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_snake_case : str = convert_examples_to_features(
a_, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, )
logger.info("""Saving features into cached file %s""", a_ )
torch.save(a_, a_ )
def UpperCamelCase_ ( self: str, a_: str, a_: int, a_: bool = False ):
'''simple docstring'''
_snake_case : Optional[int] = """dev""" if mode == """test""" else mode
_snake_case : str = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""", a_ )
_snake_case : int = torch.load(a_ )
_snake_case : Optional[int] = torch.tensor([f.input_ids for f in features], dtype=torch.long )
_snake_case : List[str] = torch.tensor([f.attention_mask for f in features], dtype=torch.long )
_snake_case : str = torch.tensor([f.token_type_ids for f in features], dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case : Optional[int] = torch.tensor([f.label for f in features], dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case : List[Any] = torch.tensor([f.label for f in features], dtype=torch.float )
return DataLoader(
TensorDataset(a_, a_, a_, a_ ), batch_size=a_, shuffle=a_, )
def UpperCamelCase_ ( self: Tuple, a_: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_snake_case : Optional[int] = self(**a_ )
_snake_case : Union[str, Any] = outputs[:2]
_snake_case : List[str] = logits.detach().cpu().numpy()
_snake_case : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_snake_case : Tuple = np.concatenate([x["""pred"""] for x in outputs], axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case : Tuple = np.argmax(a_, axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case : Optional[int] = np.squeeze(a_ )
_snake_case : Optional[Any] = np.concatenate([x["""target"""] for x in outputs], axis=0 )
_snake_case : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task, a_, a_ )}
_snake_case : Optional[int] = dict(results.items() )
_snake_case : str = results
return ret, preds_list, out_label_list
def UpperCamelCase_ ( self: str, a_: list ):
'''simple docstring'''
_snake_case : Tuple = self._eval_end(a_ )
_snake_case : int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
_snake_case : str = self._eval_end(a_ )
_snake_case : List[str] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Tuple ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(a_, a_ )
parser.add_argument(
"""--max_seq_length""", default=128, type=a_, help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
), )
parser.add_argument(
"""--task""", default="""""", type=a_, required=a_, help="""The GLUE task to run""", )
parser.add_argument(
"""--gpus""", default=0, type=a_, help="""The number of GPUs allocated for this, it is by default 0 meaning none""", )
parser.add_argument(
"""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""" )
return parser
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd() )
_snake_case : Optional[Any] = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd() )
_snake_case : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case : int = os.path.join(
"""./results""" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_snake_case : Optional[Any] = GLUETransformer(snake_case__ )
_snake_case : Union[str, Any] = generic_train(snake_case__ , snake_case__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=snake_case__ ) )
_snake_case : Any = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case__ )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_snake_case : Union[str, Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
if exitstatus == 5:
_snake_case : Tuple = 0
# Doctest custom flag to ignore output.
A_ = doctest.register_optionflag('''IGNORE_RESULT''')
A_ = doctest.OutputChecker
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple, a_: Dict, a_: Any, a_: Tuple ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, a_, a_, a_ )
A_ = CustomOutputChecker
A_ = HfDoctestModule
A_ = HfDocTestParser
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase:
'''simple docstring'''
def __init__( self: Tuple, a_: Any, a_: Optional[Any]=13, a_: List[str]=7, a_: str=True, a_: Union[str, Any]=True, a_: Optional[Any]=True, a_: int=True, a_: str=99, a_: List[Any]=32, a_: Optional[Any]=5, a_: int=4, a_: Optional[int]=37, a_: Dict="gelu", a_: List[Any]=0.1, a_: Dict=0.1, a_: List[str]=128, a_: str=32, a_: Optional[int]=16, a_: Optional[Any]=2, a_: int=0.02, a_: Tuple=3, a_: Any=4, a_: Dict=None, ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : Dict = batch_size
_snake_case : List[Any] = seq_length
_snake_case : List[Any] = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : Optional[int] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Any = vocab_size
_snake_case : Dict = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : List[Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Optional[int] = type_sequence_label_size
_snake_case : Optional[Any] = initializer_range
_snake_case : List[str] = num_labels
_snake_case : List[Any] = num_choices
_snake_case : Optional[int] = scope
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : List[str] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Optional[int] = None
_snake_case : int = None
_snake_case : Dict = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : Tuple = ids_tensor([self.batch_size], self.num_choices )
_snake_case : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
(
_snake_case
) : Union[str, Any] = self.prepare_config_and_inputs()
_snake_case : int = True
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self: Tuple, a_: str, a_: Optional[int], a_: int, a_: Dict, a_: int, a_: Any, a_: str ):
'''simple docstring'''
_snake_case : Dict = NezhaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_, attention_mask=a_, token_type_ids=a_ )
_snake_case : Union[str, Any] = model(a_, token_type_ids=a_ )
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: List[str], a_: str, a_: Any, a_: Any, a_: Tuple, a_: Any, a_: Tuple, a_: Any, ):
'''simple docstring'''
_snake_case : str = True
_snake_case : Dict = NezhaModel(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[Any] = model(
a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, )
_snake_case : Dict = model(
a_, attention_mask=a_, token_type_ids=a_, encoder_hidden_states=a_, )
_snake_case : Tuple = model(a_, attention_mask=a_, token_type_ids=a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: int, a_: Optional[Any], a_: str, a_: Dict, a_: Optional[int], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = NezhaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: Tuple, a_: int, a_: Optional[Any], a_: Any, a_: int, a_: Union[str, Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = NezhaForNextSentencePrediction(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(
a_, attention_mask=a_, token_type_ids=a_, labels=a_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Union[str, Any], a_: List[str], a_: Optional[Any], a_: int, a_: int, a_: Dict ):
'''simple docstring'''
_snake_case : Tuple = NezhaForPreTraining(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(
a_, attention_mask=a_, token_type_ids=a_, labels=a_, next_sentence_label=a_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int, a_: List[Any], a_: List[str], a_: str, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = NezhaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(
a_, attention_mask=a_, token_type_ids=a_, start_positions=a_, end_positions=a_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: int, a_: Union[str, Any], a_: str, a_: Optional[Any], a_: str, a_: List[str], a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : Any = self.num_labels
_snake_case : Union[str, Any] = NezhaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Dict, a_: Any, a_: Dict, a_: Tuple, a_: List[Any], a_: int, a_: str, a_: Dict ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : List[Any] = NezhaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: Any, a_: Dict, a_: Union[str, Any], a_: str, a_: int, a_: int, a_: Tuple, a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_choices
_snake_case : Optional[Any] = NezhaForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = model(
a_, attention_mask=a_, token_type_ids=a_, labels=a_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
_snake_case
) : Tuple = config_and_inputs
_snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: List[Any], a_: List[str]=False ):
'''simple docstring'''
_snake_case : Any = super()._prepare_for_class(a_, a_, return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
_snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=a_ )
_snake_case : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=a_ )
return inputs_dict
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = NezhaModelTester(self )
_snake_case : List[Any] = ConfigTester(self, config_class=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
(
_snake_case
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
a_, a_, a_, a_, a_, a_, a_, a_, a_, )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = NezhaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_snake_case : str = True
_snake_case : Tuple = model_class(config=a_ )
_snake_case : Optional[int] = self._prepare_for_class(a_, a_ )
_snake_case : Optional[Any] = torch.jit.trace(
a_, (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_, os.path.join(a_, """bert.pt""" ) )
_snake_case : List[str] = torch.jit.load(os.path.join(a_, """bert.pt""" ), map_location=a_ )
loaded(inputs_dict["""input_ids"""].to(a_ ), inputs_dict["""attention_mask"""].to(a_ ) )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
_snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : List[Any] = model(a_, attention_mask=a_ )[0]
_snake_case : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape, a_ )
_snake_case : List[Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
_snake_case : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : Any = model(a_, attention_mask=a_ )[0]
_snake_case : Optional[int] = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape, a_ )
_snake_case : int = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a_, atol=1E-4 ) )
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
def UpperCamelCase_ ( self: Optional[Any], **a_: int ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: str, a_: Dict ):
'''simple docstring'''
_snake_case : Dict = """tester"""
_snake_case : List[str] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_snake_case : str = tokenizer.encode([special_token], add_special_tokens=a_ )
self.assertEqual(len(a_ ), 1 )
_snake_case : Any = tokenizer.decode(a_, skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : int = self.get_input_output_texts(a_ )
_snake_case : Any = tokenizer.tokenize(a_ )
_snake_case : Dict = tokenizer.convert_tokens_to_ids(a_ )
_snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertNotEqual(len(a_ ), 0 )
_snake_case : Optional[Any] = tokenizer.decode(a_ )
self.assertIsInstance(a_, a_ )
self.assertEqual(text_a.replace(""" """, """""" ), a_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase__ (*snake_case__ : List[Any] , snake_case__ : Optional[Union[Dict, Any]] = None , snake_case__ : str=True , snake_case__ : Dict=2 ):
"""simple docstring"""
from .. import __version__
_snake_case : Tuple = take_from
_snake_case : Tuple = ()
if not isinstance(args[0] , snake_case__ ):
_snake_case : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
_snake_case : Optional[Any] = None
if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case__ ),)
_snake_case : List[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(snake_case__ , snake_case__ ):
values += (getattr(snake_case__ , snake_case__ ),)
_snake_case : Union[str, Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_snake_case : List[Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_snake_case : Any = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
_snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
_snake_case : str = call_frame.filename
_snake_case : int = call_frame.lineno
_snake_case : Any = call_frame.function
_snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(snake_case__ ) == 0:
return
elif len(snake_case__ ) == 1:
return values[0]
return values
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCAmelCase__ ():
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_snake_case : Dict = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCAmelCase__ ():
"""simple docstring"""
assert _test_patching.open is open
_snake_case : Any = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , snake_case__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , snake_case__ ):
pass
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , snake_case__ ) is None
with patch_submodule(_test_patching , """len""" , snake_case__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = """__test_patch_submodule_start_and_stop_mock__"""
_snake_case : Dict = patch_submodule(_test_patching , """open""" , snake_case__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCAmelCase__ ():
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_snake_case : Any = """__test_patch_submodule_successive_join__"""
_snake_case : Union[str, Any] = """__test_patch_submodule_successive_dirname__"""
_snake_case : Tuple = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
with patch_submodule(_test_patching , """os.rename""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.join""" , snake_case__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , snake_case__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , snake_case__ ):
pass
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase:
'''simple docstring'''
def __init__( self: Union[str, Any], a_: int = 6 ):
'''simple docstring'''
_snake_case : Node | None = None
_snake_case : Node | None = None
self.create_linked_list(a_ )
def UpperCamelCase_ ( self: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = Node()
_snake_case : Any = current_node
_snake_case : int = current_node
_snake_case : Union[str, Any] = current_node
for _ in range(1, a_ ):
_snake_case : Optional[Any] = Node()
_snake_case : str = current_node
_snake_case : str = previous_node
_snake_case : int = current_node
_snake_case : Tuple = self.front
_snake_case : str = previous_node
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCamelCase_ ( self: int, a_: Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_snake_case : Tuple = self.rear.next
if self.rear:
_snake_case : int = data
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_snake_case : Tuple = self.front.data
_snake_case : Optional[Any] = None
return data
_snake_case : Optional[Any] = self.front
_snake_case : List[Any] = old_front.next
_snake_case : Optional[Any] = old_front.data
_snake_case : Optional[int] = None
return data
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowercase:
'''simple docstring'''
def __init__( self: Tuple ):
'''simple docstring'''
_snake_case : Any | None = None
_snake_case : Node | None = None
_snake_case : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = GPTSanJapaneseTokenizer
lowercase__ = False
lowercase__ = {"do_clean_text": False, "add_prefix_space": False}
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : Optional[int] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
_snake_case : Tuple = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
_snake_case : Tuple = {"""unk_token""": """<unk>"""}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file, """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(a_ ) )
def UpperCamelCase_ ( self: Dict, **a_: List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: int, a_: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
_snake_case : Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCamelCase_ ( self: Any, a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.get_input_output_texts(a_ )
_snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : str = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
return text, ids
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.get_tokenizer()
# Testing tokenization
_snake_case : Tuple = """こんにちは、世界。 こんばんは、㔺界。"""
_snake_case : Union[str, Any] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
_snake_case : List[Any] = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = self.get_tokenizer()
# Testing tokenization
_snake_case : Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
_snake_case : List[str] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_snake_case : Optional[Any] = """こんにちは、世界。"""
_snake_case : List[Any] = """こんばんは、㔺界。😀"""
_snake_case : Tuple = """こんにちは、世界。こんばんは、世界。😀"""
_snake_case : str = tokenizer.encode(prefix_text + input_text )
_snake_case : Optional[Any] = tokenizer.encode("""""", prefix_text=prefix_text + input_text )
_snake_case : Optional[int] = tokenizer.encode(a_, prefix_text=a_ )
_snake_case : List[Any] = tokenizer.decode(a_ )
_snake_case : str = tokenizer.decode(a_ )
_snake_case : Union[str, Any] = tokenizer.decode(a_ )
self.assertEqual(a_, a_ )
self.assertEqual(a_, a_ )
self.assertEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
_snake_case : Union[str, Any] = """こんにちは、世界。"""
_snake_case : Optional[Any] = """こんばんは、㔺界。😀"""
_snake_case : Dict = len(tokenizer.encode(a_ ) ) - 2
_snake_case : List[Any] = len(tokenizer.encode(a_ ) ) - 2
_snake_case : Tuple = [1] + [0] * (len_prefix + len_text + 1)
_snake_case : Dict = [1] * (len_prefix + len_text + 1) + [0]
_snake_case : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_snake_case : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_snake_case : Optional[int] = tokenizer("""""", prefix_text=prefix_text + input_text ).token_type_ids
_snake_case : Optional[Any] = tokenizer(a_, prefix_text=a_ ).token_type_ids
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_snake_case : int = tokenizer.encode("""あンいワ""" )
_snake_case : Optional[int] = tokenizer.encode("""""", prefix_text="""あンいワ""" )
_snake_case : Union[str, Any] = tokenizer.encode("""いワ""", prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(a_ ), tokenizer.decode(a_ ) )
self.assertEqual(tokenizer.decode(a_ ), tokenizer.decode(a_ ) )
self.assertNotEqual(a_, a_ )
self.assertNotEqual(a_, a_ )
self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
_snake_case : List[str] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
_snake_case : Dict = tokenizer(a_, padding=a_ )
_snake_case : List[Any] = tokenizer.batch_encode_plus(a_, padding=a_ )
# fmt: off
_snake_case : Any = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
_snake_case : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_snake_case : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, a_ )
self.assertListEqual(x_token.token_type_ids, a_ )
self.assertListEqual(x_token.attention_mask, a_ )
self.assertListEqual(x_token_a.input_ids, a_ )
self.assertListEqual(x_token_a.token_type_ids, a_ )
self.assertListEqual(x_token_a.attention_mask, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
def get_matched_characters(snake_case__ : str , snake_case__ : str ) -> str:
_snake_case : str = []
_snake_case : Any = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_snake_case : Optional[int] = int(max(0 , i - limit ) )
_snake_case : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case__ )
_snake_case : List[str] = F"{_stra[0:_stra.index(snake_case__ )]} {_stra[_stra.index(snake_case__ ) + 1:]}"
return "".join(snake_case__ )
# matching characters
_snake_case : Optional[Any] = get_matched_characters(snake_case__ , snake_case__ )
_snake_case : Optional[Any] = get_matched_characters(snake_case__ , snake_case__ )
_snake_case : int = len(snake_case__ )
# transposition
_snake_case : List[str] = (
len([(ca, ca) for ca, ca in zip(snake_case__ , snake_case__ ) if ca != ca] ) // 2
)
if not match_count:
_snake_case : Tuple = 0.0
else:
_snake_case : str = (
1
/ 3
* (
match_count / len(snake_case__ )
+ match_count / len(snake_case__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_snake_case : int = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Optional[Any], a_: Optional[int]=13, a_: Dict=7, a_: Optional[Any]=True, a_: Any=True, a_: Any=True, a_: Any=99, a_: List[str]=32, a_: Any=5, a_: Optional[Any]=4, a_: Dict=37, a_: List[str]="gelu", a_: Any=0.1, a_: List[Any]=0.1, a_: Any=512, a_: int=16, a_: Dict=2, a_: Dict=0.02, a_: Any=3, a_: Optional[int]=4, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : str = parent
_snake_case : Dict = batch_size
_snake_case : Tuple = seq_length
_snake_case : Any = is_training
_snake_case : Union[str, Any] = use_token_type_ids
_snake_case : Tuple = use_labels
_snake_case : str = vocab_size
_snake_case : Any = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Tuple = type_sequence_label_size
_snake_case : Dict = initializer_range
_snake_case : Dict = num_labels
_snake_case : int = num_choices
_snake_case : str = scope
_snake_case : Tuple = self.vocab_size - 1
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Any = None
if self.use_token_type_ids:
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Dict = None
_snake_case : int = None
if self.use_labels:
_snake_case : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : Tuple = ids_tensor([self.batch_size], self.num_choices )
_snake_case : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
_snake_case : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: str, a_: Optional[int], a_: List[Any], *a_: str ):
'''simple docstring'''
_snake_case : List[str] = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_, token_type_ids=a_, head_mask=a_ )
_snake_case : Union[str, Any] = model(a_, token_type_ids=a_ )
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Tuple, a_: List[Any], a_: Tuple, *a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[Any] = model(a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: str, a_: Any, a_: int, a_: Dict, a_: List[str], *a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: List[Any], a_: int, a_: str, a_: Optional[Any], a_: Dict, *a_: Optional[int] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Union[str, Any] = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : str = model(a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
_snake_case
) : List[str] = config_and_inputs
_snake_case : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self: str, a_: str, a_: Dict, a_: List[str], a_: str, a_: Tuple ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int], a_: Optional[int]=False ):
'''simple docstring'''
_snake_case : Tuple = super()._prepare_for_class(a_, a_, return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_snake_case : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=a_, )
_snake_case : int = inputs_dict["""labels"""]
_snake_case : str = inputs_dict["""labels"""]
_snake_case : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=a_, )
_snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=a_ )
return inputs_dict
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = OpenAIGPTModelTester(self )
_snake_case : str = ConfigTester(self, config_class=a_, n_embd=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(a_ )
_snake_case : Optional[Any] = torch.tensor([[481, 4_735, 544]], dtype=torch.long, device=a_ ) # the president is
_snake_case : List[Any] = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_snake_case : str = model.generate(a_, do_sample=a_ )
self.assertListEqual(output_ids[0].tolist(), a_ )
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
A_ = HfArgumentParser(InitializationArguments)
A_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
A_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
A_ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
A_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
A_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
lowercase__ = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
lowercase__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
lowercase__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowercase__ = field(default=2 , metadata={"help": "Batch size for training."} )
lowercase__ = field(default=2 , metadata={"help": "Batch size for evaluation."} )
lowercase__ = field(default=0.1 , metadata={"help": "Value of weight decay."} )
lowercase__ = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
lowercase__ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
lowercase__ = field(default="cosine" , metadata={"help": "Learning rate."} )
lowercase__ = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
lowercase__ = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
lowercase__ = field(
default=__a , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
lowercase__ = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
lowercase__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowercase__ = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
lowercase__ = field(default=1 , metadata={"help": "Training seed."} )
lowercase__ = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
lowercase__ = field(
default=__a , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
lowercase__ = field(default=__a , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowercase__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowercase__ = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
lowercase__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowercase__ = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
lowercase__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowercase__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} )
lowercase__ = field(
default=__a , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
lowercase__ = field(
default=__a , metadata={"help": "Sample from the language model's output distribution."} )
lowercase__ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
lowercase__ = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
lowercase__ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
lowercase__ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
lowercase__ = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
lowercase__ = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
lowercase__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
lowercase__ = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
lowercase__ = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
lowercase__ = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default=__a , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
lowercase__ = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
lowercase__ = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
lowercase__ = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
lowercase__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
lowercase__ = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
lowercase__ = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
lowercase__ = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
lowercase__ = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
lowercase__ = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
lowercase__ = field(
default=__a , metadata={"help": "If True, near-duplicate samples are removed."} )
lowercase__ = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
lowercase__ = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
lowercase__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
lowercase__ = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
lowercase__ = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
lowercase__ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
lowercase__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
lowercase__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
lowercase__ = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
lowercase__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
lowercase__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
lowercase__ = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
lowercase__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} )
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """xlm"""
lowerCAmelCase_ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __lowerCAmelCase=3_0_1_4_5 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=True , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2_0_4_8**-0.5 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=5 , __lowerCAmelCase=True , __lowerCAmelCase="first" , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = emb_dim
lowerCamelCase__ = n_layers
lowerCamelCase__ = n_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = gelu_activation
lowerCamelCase__ = sinusoidal_embeddings
lowerCamelCase__ = causal
lowerCamelCase__ = asm
lowerCamelCase__ = n_langs
lowerCamelCase__ = use_lang_emb
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = bos_index
lowerCamelCase__ = eos_index
lowerCamelCase__ = pad_index
lowerCamelCase__ = unk_index
lowerCamelCase__ = mask_index
lowerCamelCase__ = is_encoder
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = embed_init_std
lowerCamelCase__ = init_std
lowerCamelCase__ = summary_type
lowerCamelCase__ = summary_use_proj
lowerCamelCase__ = summary_activation
lowerCamelCase__ = summary_proj_to_labels
lowerCamelCase__ = summary_first_dropout
lowerCamelCase__ = start_n_top
lowerCamelCase__ = end_n_top
lowerCamelCase__ = mask_token_id
lowerCamelCase__ = lang_id
if "n_words" in kwargs:
lowerCamelCase__ = kwargs['''n_words''']
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 29 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 | 1 |
from math import ceil
def lowerCAmelCase__(__snake_case = 1001 ) -> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase__ = 2 * i + 1
lowerCamelCase__ = 2 * i
lowerCamelCase__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_a = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 29 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29 | 1 |
import numpy
# List of input, output pairs
_a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_a = (((515, 22, 13), 555), ((61, 35, 49), 150))
_a = [2, 4, 1, 5]
_a = len(train_data)
_a = 0.009
def lowerCAmelCase__(__snake_case ,__snake_case="train" ) -> Optional[int]:
'''simple docstring'''
return calculate_hypothesis_value(__snake_case ,__snake_case ) - output(
__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = 0
for i in range(len(__snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase__(__snake_case ,__snake_case=m ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = 0
for i in range(__snake_case ):
if index == -1:
summation_value += _error(__snake_case )
else:
summation_value += _error(__snake_case ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = summation_of_cost_derivative(__snake_case ,__snake_case ) / m
return cost_derivative_value
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.0_0_0_0_0_2
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 ,len(__snake_case ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__snake_case ,__snake_case ,atol=__snake_case ,rtol=__snake_case ,):
break
lowerCamelCase__ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
for i in range(len(__snake_case ) ):
print(('''Actual output value:''', output(__snake_case ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__snake_case ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 29 |
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29 | 1 |
def lowerCAmelCase__(__snake_case = 50000000 ) -> int:
'''simple docstring'''
lowerCamelCase__ = set()
lowerCamelCase__ = int((limit - 24) ** (1 / 2) )
lowerCamelCase__ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__snake_case ) ) )
for primea in primes:
lowerCamelCase__ = primea * primea
for primea in primes:
lowerCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase__ = primea * primea * primea * primea
lowerCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_a = 4
_a = 3
class __A ( lowerCAmelCase ):
'''simple docstring'''
pass
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
lowerCamelCase__ = int(os.environ['''RANK'''] )
lowerCamelCase__ = int(os.environ['''WORLD_SIZE'''] )
lowerCamelCase__ = ArgumentParser()
parser.add_argument('''--streaming''' ,type=__snake_case )
parser.add_argument('''--local_rank''' ,type=__snake_case )
parser.add_argument('''--num_workers''' ,type=__snake_case ,default=0 )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.streaming
lowerCamelCase__ = args.num_workers
lowerCamelCase__ = {'''shards''': [F'shard_{shard_idx}' for shard_idx in range(__snake_case )]}
lowerCamelCase__ = IterableDataset.from_generator(__snake_case ,gen_kwargs=__snake_case )
if not streaming:
lowerCamelCase__ = Dataset.from_list(list(__snake_case ) )
lowerCamelCase__ = split_dataset_by_node(__snake_case ,rank=__snake_case ,world_size=__snake_case )
lowerCamelCase__ = torch.utils.data.DataLoader(__snake_case ,num_workers=__snake_case )
lowerCamelCase__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 29 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
from manim import *
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = Text('''CPU''' , font_size=2_4 )
lowerCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(4 )]
lowerCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = Text('''GPU''' , font_size=2_4 )
lowerCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = Text('''Model''' , font_size=2_4 )
lowerCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowerCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCamelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
cpu_targs.append(__lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowerCamelCase__ = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , aligned_edge=__lowerCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCamelCase__ = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) , Write(__lowerCAmelCase ) )
self.play(Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
first_animations.append(GrowFromCenter(__lowerCAmelCase , run_time=1 ) )
lowerCamelCase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 29 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29 | 1 |
from PIL import Image
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Image:
'''simple docstring'''
def brightness(__snake_case ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 29 |
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip_2_vision_model"""
def __init__( self , __lowerCAmelCase=1_4_0_8 , __lowerCAmelCase=6_1_4_4 , __lowerCAmelCase=3_9 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=1_4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0_0001 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = qkv_bias
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip_2_qformer"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=2 , __lowerCAmelCase=1_4_0_8 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = cross_attention_frequency
lowerCamelCase__ = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip-2"""
lowerCAmelCase_ = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=3_2 , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if vision_config is None:
lowerCamelCase__ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase__ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase__ = BlipaVisionConfig(**__lowerCAmelCase )
lowerCamelCase__ = BlipaQFormerConfig(**__lowerCAmelCase )
lowerCamelCase__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase__ = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase )
lowerCamelCase__ = self.text_config.tie_word_embeddings
lowerCamelCase__ = self.text_config.is_encoder_decoder
lowerCamelCase__ = num_query_tokens
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCAmelCase , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.qformer_config.to_dict()
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_a = re.compile(r"\s+")
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(__snake_case ,'''''' ,example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [len(__snake_case ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )}
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def lowerCAmelCase__(__snake_case ,__snake_case=5 ) -> int:
'''simple docstring'''
lowerCamelCase__ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
lowerCamelCase__ = example['''content'''].splitlines()
for _, line in zip(range(__snake_case ) ,__snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase__(__snake_case ,__snake_case=5 ,__snake_case=0.0_5 ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = ['''unit tests''', '''test file''', '''configuration file''']
lowerCamelCase__ = example['''content'''].splitlines()
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# first test
for _, line in zip(range(__snake_case ) ,__snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase__ = example['''content'''].count('''\n''' )
lowerCamelCase__ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = ['''def ''', '''class ''', '''for ''', '''while ''']
lowerCamelCase__ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase__(__snake_case ,__snake_case=4 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = example['''content'''].splitlines()
lowerCamelCase__ = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = tokenizer(example['''content'''] ,truncation=__snake_case )['''input_ids''']
lowerCamelCase__ = len(example['''content'''] ) / len(__snake_case )
return {"ratio": ratio}
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {}
results.update(get_hash(__snake_case ) )
results.update(line_stats(__snake_case ) )
results.update(alpha_stats(__snake_case ) )
results.update(char_token_ratio(__snake_case ) )
results.update(is_autogenerated(__snake_case ) )
results.update(is_config_or_test(__snake_case ) )
results.update(has_no_keywords(__snake_case ) )
results.update(has_few_assignments(__snake_case ) )
return results
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
if not check_uniques(__snake_case ,__snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
with open(__snake_case ,'''rb''' ) as f_in:
with gzip.open(str(__snake_case ) + '''.gz''' ,'''wb''' ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__snake_case ,__snake_case )
os.unlink(__snake_case )
# Settings
_a = HfArgumentParser(PreprocessingArguments)
_a = parser.parse_args()
if args.num_workers is None:
_a = multiprocessing.cpu_count()
_a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_a = time.time()
_a = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_a = time.time()
_a = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_a = set(ds.unique("hash"))
_a = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_a = time.time()
_a = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_a = time.time()
_a , _a = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_a = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
_a = output_dir / "data"
data_dir.mkdir(exist_ok=True)
_a = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_a = str(data_dir / f"""file-{file_number+1:012}.json""")
_a = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 29 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = RobertaTokenizer
lowerCAmelCase_ = RobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {"""cls_token""": """<s>"""}
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowerCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = '''Encode this sequence.'''
lowerCamelCase__ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
lowerCamelCase__ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowerCamelCase__ = '''Encode <mask> sequence'''
lowerCamelCase__ = '''Encode <mask>sequence'''
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase )
lowerCamelCase__ = encoded.index(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase )
lowerCamelCase__ = encoded.index(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ = F'{text_of_1_token} {text_of_1_token}'
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 29 |
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
_a = tuple[int, int]
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = vertices
lowerCamelCase__ = {
(min(__lowerCAmelCase ), max(__lowerCAmelCase )): weight for edge, weight in edges.items()
}
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase__ = weight
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = Graph({min(self.vertices )} , {} )
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase__ = edge
lowerCamelCase__ = weight
subgraph.add_edge(__lowerCAmelCase , __lowerCAmelCase )
return subgraph
def lowerCAmelCase__(__snake_case = "p107_network.txt" ) -> int:
'''simple docstring'''
lowerCamelCase__ = os.path.abspath(os.path.dirname(__snake_case ) )
lowerCamelCase__ = os.path.join(__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
with open(__snake_case ) as f:
lowerCamelCase__ = f.read().strip().split('''\n''' )
lowerCamelCase__ = [line.split(''',''' ) for line in data]
for edgea in range(1 ,len(__snake_case ) ):
for edgea in range(__snake_case ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase__ = Graph(set(range(len(__snake_case ) ) ) ,__snake_case )
lowerCamelCase__ = graph.prims_algorithm()
lowerCamelCase__ = sum(graph.edges.values() )
lowerCamelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29 | 1 |
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29 | 1 |
def lowerCAmelCase__(__snake_case = 1000000 ) -> int:
'''simple docstring'''
lowerCamelCase__ = set(range(3 ,__snake_case ,2 ) )
primes.add(2 )
for p in range(3 ,__snake_case ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,__snake_case ,__snake_case ) ) )
lowerCamelCase__ = [float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case ,limit + 1 ,__snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """roberta-prelayernorm"""
def __init__( self , __lowerCAmelCase=5_0_2_6_5 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 29 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_a = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a = concatenate_datasets
_a = DownloadConfig
_a = DownloadManager
_a = DownloadMode
_a = DownloadConfig
_a = DownloadMode
_a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 29 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29 | 1 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_a = logging.getLogger(__name__)
class __A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
lowerCamelCase__ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if not self.initialized:
lowerCamelCase__ = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowerCamelCase__ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.retriever._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__lowerCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowerCamelCase__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase__ , lowerCamelCase__ = ray.get(random_worker.retrieve.remote(__lowerCAmelCase , __lowerCAmelCase ) )
else:
lowerCamelCase__ , lowerCamelCase__ = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
return super(__lowerCAmelCase , cls ).get_tokenizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''config''' , __lowerCAmelCase ) or RagConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowerCamelCase__ = rag_tokenizer.question_encoder
lowerCamelCase__ = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase__ = '''custom'''
lowerCamelCase__ = CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase )
else:
lowerCamelCase__ = cls._build_index(__lowerCAmelCase )
return cls(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , retrieval_workers=__lowerCAmelCase , index=__lowerCAmelCase , )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""input_features""", """attention_mask"""]
def __init__( self , __lowerCAmelCase=8_0 , __lowerCAmelCase=1_6_0_0_0 , __lowerCAmelCase=8_0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = num_mel_bins
lowerCamelCase__ = do_ceptral_normalize
lowerCamelCase__ = normalize_means
lowerCamelCase__ = normalize_vars
lowerCamelCase__ = True
def __lowerCamelCase ( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
lowerCamelCase__ = ta_kaldi.fbank(__lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0.0 , ):
'''simple docstring'''
if normalize_means:
lowerCamelCase__ = x[:input_length].mean(axis=0 )
lowerCamelCase__ = np.subtract(__lowerCAmelCase , __lowerCAmelCase )
if normalize_vars:
lowerCamelCase__ = x[:input_length].std(axis=0 )
lowerCamelCase__ = np.divide(__lowerCAmelCase , __lowerCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase__ = padding_value
# make sure array is in float32
lowerCamelCase__ = x.astype(np.floataa )
return x
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCAmelCase , __lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )
]
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase__ = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [raw_speech]
# extract fbank features
lowerCamelCase__ = [self._extract_fbank_features(__lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ = BatchFeature({'''input_features''': features} )
lowerCamelCase__ = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
# make sure list is in array format
lowerCamelCase__ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowerCAmelCase ):
lowerCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowerCamelCase__ = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase__ = (
np.array(__lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase__ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase__ = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 29 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29 | 1 |
import torch
from transformers import AutoModel
class __A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__lowerCAmelCase , self ).__init__()
lowerCamelCase__ = AutoModel.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowerCamelCase__ = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCamelCase__ = torch.nn.Softmax(dim=1 )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return self.bert(**__lowerCAmelCase ).last_hidden_state
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = W_supports['''sizes'''].tolist()
lowerCamelCase__ = W_supports['''start_token_id'''].item()
lowerCamelCase__ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase__ = self.BERT(**__lowerCAmelCase )
lowerCamelCase__ = self.BERT(**__lowerCAmelCase )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = W_supports['''input_ids'''] == start_token_id
lowerCamelCase__ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__lowerCAmelCase ):
if i == 0:
lowerCamelCase__ = 0
else:
lowerCamelCase__ = support_sizes[i - 1]
lowerCamelCase__ = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase__ = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase__ = torch.vstack((p_starts, p_start) )
lowerCamelCase__ = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase__ = p_start
lowerCamelCase__ = p_end
return p_starts, p_ends
| 29 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase__ = orig_key.replace('''model.''' ,'''''' )
if "norm1" in orig_key:
lowerCamelCase__ = orig_key.replace('''norm1''' ,'''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase__ = orig_key.replace('''norm2''' ,'''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase__ = orig_key.replace('''norm''' ,'''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase__ = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase__ = orig_key.replace(F'transformer_{layer_num}' ,F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
lowerCamelCase__ = orig_key.replace('''mha.attn''' ,'''attention.self''' )
if "mha" in orig_key:
lowerCamelCase__ = orig_key.replace('''mha''' ,'''attention''' )
if "W_q" in orig_key:
lowerCamelCase__ = orig_key.replace('''W_q''' ,'''self.query''' )
if "W_k" in orig_key:
lowerCamelCase__ = orig_key.replace('''W_k''' ,'''self.key''' )
if "W_v" in orig_key:
lowerCamelCase__ = orig_key.replace('''W_v''' ,'''self.value''' )
if "ff1" in orig_key:
lowerCamelCase__ = orig_key.replace('''ff1''' ,'''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase__ = orig_key.replace('''ff2''' ,'''output.dense''' )
if "ff" in orig_key:
lowerCamelCase__ = orig_key.replace('''ff''' ,'''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase__ = orig_key.replace('''mlm.mlm_class''' ,'''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase__ = orig_key.replace('''mlm''' ,'''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase__ = '''yoso.''' + orig_key
return orig_key
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__ = val
lowerCamelCase__ = orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase__ = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase__ = YosoConfig.from_json_file(__snake_case )
lowerCamelCase__ = YosoForMaskedLM(__snake_case )
lowerCamelCase__ = convert_checkpoint_helper(config.max_position_embeddings ,__snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 29 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
_a = 300 # TEMPERATURE (unit = K)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a = logging.get_logger(__name__)
# TODO: upload to AWS
_a = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """retribert"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = share_encoders
lowerCamelCase__ = projection_dim
| 29 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = BertConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase__ = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case ,__snake_case ,__snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 29 |
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = CanineTokenizer
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
lowerCamelCase__ = 1_0_2_4
return tokenizer
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.canine_tokenizer
lowerCamelCase__ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase__ = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
lowerCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.canine_tokenizer
lowerCamelCase__ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertIn('''token_type_ids''' , __lowerCAmelCase )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.canine_tokenizer
lowerCamelCase__ = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase__ = tokenizer(
text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
lowerCamelCase__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase__ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase__ = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn(__lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCamelCase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ , lowerCamelCase__ = self.get_clean_sequence(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase__ = 0xE_0_0_5
lowerCamelCase__ = chr(__lowerCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
lowerCamelCase__ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , input_encoded + special_token_id )
lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = chr(0xE_0_0_5 )
lowerCamelCase__ = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCAmelCase )
self.assertEqual(token_a[0] , __lowerCAmelCase )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
lowerCamelCase__ = 0xE_0_0_6
lowerCamelCase__ = chr(__lowerCAmelCase )
lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCAmelCase )
tokenizer.from_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase__ = json.load(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase__ = 0xE_0_0_6
lowerCamelCase__ = chr(__lowerCAmelCase )
lowerCamelCase__ = [new_token_a]
lowerCamelCase__ = [new_token_a]
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ = tokenizer_class.from_pretrained(__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase__ = 0xE_0_0_7
lowerCamelCase__ = chr(__lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ = [AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )]
lowerCamelCase__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = '''hello world'''
if self.space_between_special_tokens:
lowerCamelCase__ = '''[CLS] hello world [SEP]'''
else:
lowerCamelCase__ = input
lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCAmelCase , [output, output.lower()] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase__ = '''a'''
lowerCamelCase__ = ord(__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + '''_id''' , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + '''_id''' ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + '''_id''' , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + '''_id''' ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens_ids''' ) , [] )
lowerCamelCase__ = 0xE_0_0_6
lowerCamelCase__ = chr(__lowerCAmelCase )
setattr(__lowerCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """decision_transformer"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=1_7 , __lowerCAmelCase=4 , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=3 , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase="relu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = state_dim
lowerCamelCase__ = act_dim
lowerCamelCase__ = hidden_size
lowerCamelCase__ = max_ep_len
lowerCamelCase__ = action_tanh
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = scale_attn_by_inverse_layer_idx
lowerCamelCase__ = reorder_and_upcast_attn
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 29 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 |
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_a = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_a = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_a = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ):
'''simple docstring'''
if rouge_types is None:
lowerCamelCase__ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCamelCase__ = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
lowerCamelCase__ = scoring.BootstrapAggregator()
else:
lowerCamelCase__ = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
lowerCamelCase__ = aggregator.aggregate()
else:
lowerCamelCase__ = {}
for key in scores[0]:
lowerCamelCase__ = [score[key] for score in scores]
return result
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if len(__snake_case ) != 2 or len(a[0] ) != 2 or len(__snake_case ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowerCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def lowerCAmelCase__(__snake_case ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = matrix_length // 2
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case ,__snake_case )] for i in range(__snake_case )]
lowerCamelCase__ = [
[a[i][j] for j in range(__snake_case ,__snake_case )] for i in range(__snake_case ,__snake_case )
]
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case )]
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case ,__snake_case )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase__(__snake_case ) -> tuple[int, int]:
'''simple docstring'''
return len(__snake_case ), len(matrix[0] )
def lowerCAmelCase__(__snake_case ) -> None:
'''simple docstring'''
print('''\n'''.join(str(__snake_case ) for line in matrix ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if matrix_dimensions(__snake_case ) == (2, 2):
return default_matrix_multiplication(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = split_matrix(__snake_case )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = split_matrix(__snake_case )
lowerCamelCase__ = actual_strassen(__snake_case ,matrix_subtraction(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,__snake_case )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,__snake_case )
lowerCamelCase__ = actual_strassen(__snake_case ,matrix_subtraction(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_subtraction(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_subtraction(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__snake_case ,__snake_case ) ,__snake_case ) ,__snake_case )
lowerCamelCase__ = matrix_addition(__snake_case ,__snake_case )
lowerCamelCase__ = matrix_addition(__snake_case ,__snake_case )
lowerCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__snake_case ,__snake_case ) ,__snake_case ) ,__snake_case )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ = []
for i in range(len(__snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if matrix_dimensions(__snake_case )[1] != matrix_dimensions(__snake_case )[0]:
lowerCamelCase__ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__snake_case )
lowerCamelCase__ = matrix_dimensions(__snake_case )
lowerCamelCase__ = matrix_dimensions(__snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ = max(*__snake_case ,*__snake_case )
lowerCamelCase__ = int(math.pow(2 ,math.ceil(math.loga(__snake_case ) ) ) )
lowerCamelCase__ = matrixa
lowerCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,__snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ = actual_strassen(__snake_case ,__snake_case )
# Removing the additional zeros
for i in range(0 ,__snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_a = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_a = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29 | 1 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
lowerCamelCase__ = word.split()
def justify(__snake_case ,__snake_case ,__snake_case ) -> str:
lowerCamelCase__ = max_width - width
lowerCamelCase__ = len(__snake_case )
if len(__snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__snake_case ):
num_spaces_between_words_list[i] += 1
lowerCamelCase__ = []
for i in range(__snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__snake_case )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = 0
for word in words:
if width + len(__snake_case ) + len(__snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__snake_case )
width += len(__snake_case )
else:
# justify the line and add it to result
answer.append(justify(__snake_case ,__snake_case ,__snake_case ) )
# reset new line and new width
lowerCamelCase__ , lowerCamelCase__ = [word], len(__snake_case )
lowerCamelCase__ = max_width - width - len(__snake_case )
answer.append(''' '''.join(__snake_case ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29 | 1 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29 | 1 |
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
lowerCamelCase__ = u
for i in range(1 ,__snake_case ):
lowerCamelCase__ = temp * (u - i)
return temp
def lowerCAmelCase__() -> None:
'''simple docstring'''
lowerCamelCase__ = int(input('''enter the numbers of values: ''' ) )
lowerCamelCase__ = []
for _ in range(__snake_case ):
y.append([] )
for i in range(__snake_case ):
for j in range(__snake_case ):
y[i].append(__snake_case )
lowerCamelCase__ = 0
print('''enter the values of parameters in a list: ''' )
lowerCamelCase__ = list(map(__snake_case ,input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__snake_case ):
lowerCamelCase__ = float(input() )
lowerCamelCase__ = int(input('''enter the value to interpolate: ''' ) )
lowerCamelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,__snake_case ):
for j in range(n - i ):
lowerCamelCase__ = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase__ = y[0][0]
for i in range(1 ,__snake_case ):
summ += (ucal(__snake_case ,__snake_case ) * y[0][i]) / math.factorial(__snake_case )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 29 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 |
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
if hor == 128:
lowerCamelCase__ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
lowerCamelCase__ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
lowerCamelCase__ = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
lowerCamelCase__ = UNetaDModel(**__snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case )
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
lowerCamelCase__ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase__ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__snake_case )
hf_value_function.load_state_dict(__snake_case )
torch.save(hf_value_function.state_dict() ,'''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = None ,) -> Tuple:
'''simple docstring'''
if config_name_or_path is None:
lowerCamelCase__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ = question_encoder_name_or_path
lowerCamelCase__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ = RagConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = gen_config
lowerCamelCase__ = question_encoder_config
lowerCamelCase__ = model_class.from_pretrained_question_encoder_generator(
__snake_case ,__snake_case ,config=__snake_case )
rag_model.save_pretrained(__snake_case )
# Sanity check.
model_class.from_pretrained(__snake_case )
# Save tokenizers.
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_a = parser.parse_args()
_a = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 29 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.