code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
A_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ):
"""simple docstring"""
_snake_case : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
_snake_case : List[str] = 1
_snake_case : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
_snake_case : List[Any] = init[0]
_snake_case : str = init[1]
_snake_case : int = 0
_snake_case : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
_snake_case : int = [[f, g, x, y]]
_snake_case : Union[str, Any] = False # flag that is set when search is complete
_snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_snake_case : Dict = cell.pop()
_snake_case : str = next_cell[2]
_snake_case : List[str] = next_cell[3]
_snake_case : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
_snake_case : Optional[int] = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
_snake_case : Dict = x + DIRECTIONS[i][0]
_snake_case : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_snake_case : List[str] = g + cost
_snake_case : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_snake_case : str = 1
_snake_case : Any = i
_snake_case : List[Any] = []
_snake_case : List[str] = goal[0]
_snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_snake_case : Any = x - DIRECTIONS[action[x][y]][0]
_snake_case : List[Any] = y - DIRECTIONS[action[x][y]][1]
_snake_case : Optional[int] = xa
_snake_case : int = ya
invpath.append([x, y] )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
A_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A_ = [0, 0]
# all coordinates are given in format [y,x]
A_ = [len(grid) - 1, len(grid[0]) - 1]
A_ = 1
# the cost map which pushes the path closer to the goal
A_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A_ = 99
A_ , A_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 28 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 1 |
"""simple docstring"""
from math import ceil
def UpperCAmelCase__ (snake_case__ : int = 10_01 ):
"""simple docstring"""
_snake_case : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_snake_case : int = 2 * i + 1
_snake_case : List[str] = 2 * i
_snake_case : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 28 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A_ = logging.get_logger(__name__)
A_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase__ (snake_case__ : List[DatasetType] , snake_case__ : Optional[List[float]] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(snake_case__ )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case__ ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}." )
if i == 0:
_snake_case , _snake_case : int = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[DatasetType] , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(snake_case__ )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case__ ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}." )
if i == 0:
_snake_case , _snake_case : Dict = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
| 28 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : str = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_snake_case : List[Any] = DatasetInfosDict.from_directory(snake_case__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : DatasetInfo ):
"""simple docstring"""
_snake_case : str = str(snake_case__ )
dataset_info.write_to_directory(snake_case__ )
_snake_case : Union[str, Any] = DatasetInfo.from_directory(snake_case__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , """dataset_info.json""" ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_snake_case : List[str] = dataset_info._to_yaml_dict()
assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_snake_case : Dict = yaml.safe_dump(snake_case__ )
_snake_case : Any = yaml.safe_load(snake_case__ )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = DatasetInfo()
_snake_case : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : DatasetInfosDict ):
"""simple docstring"""
_snake_case : Dict = str(snake_case__ )
dataset_infos_dict.write_to_directory(snake_case__ )
_snake_case : Union[str, Any] = DatasetInfosDict.from_directory(snake_case__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_snake_case : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_snake_case : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , """README.md""" ) )
| 28 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 1 |
"""simple docstring"""
import pprint
import requests
A_ = '''https://zenquotes.io/api'''
def UpperCAmelCase__ ():
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def UpperCAmelCase__ ():
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 28 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 1 |
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase__ (snake_case__ : Callable[[float], float] , snake_case__ : float , snake_case__ : float ):
"""simple docstring"""
_snake_case : float = xa
_snake_case : float = xa
while True:
if x_n == x_na or function(snake_case__ ) == function(snake_case__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
_snake_case : float = x_na - (
function(snake_case__ ) / ((function(snake_case__ ) - function(snake_case__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_snake_case : int = x_na
_snake_case : List[Any] = x_na
def UpperCAmelCase__ (snake_case__ : float ):
"""simple docstring"""
return math.pow(snake_case__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 28 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
A_ = '''true'''
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Any=82 , snake_case__ : Optional[int]=16 ):
"""simple docstring"""
set_seed(42 )
_snake_case : Tuple = RegressionModel()
_snake_case : Union[str, Any] = deepcopy(snake_case__ )
_snake_case : Any = RegressionDataset(length=snake_case__ )
_snake_case : Optional[int] = DataLoader(snake_case__ , batch_size=snake_case__ )
model.to(accelerator.device )
_snake_case , _snake_case : str = accelerator.prepare(snake_case__ , snake_case__ )
return model, ddp_model, dataloader
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
_snake_case : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_snake_case : Tuple = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(snake_case__ : int ):
_snake_case : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
with accelerator.main_process_first():
_snake_case : List[Any] = dataset.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_snake_case : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
if use_longest:
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(snake_case__ , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=16 )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = Accelerator(dispatch_batches=snake_case__ , split_batches=snake_case__ )
_snake_case : Dict = get_dataloader(snake_case__ , not dispatch_batches )
_snake_case : Tuple = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=snake_case__ )
_snake_case , _snake_case : Tuple = accelerator.prepare(snake_case__ , snake_case__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for batch in dataloader:
_snake_case , _snake_case : List[str] = batch.values()
with torch.no_grad():
_snake_case : List[Any] = model(snake_case__ )
_snake_case , _snake_case : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case , _snake_case : Optional[int] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case__ )
targs.append(snake_case__ )
_snake_case , _snake_case : Dict = torch.cat(snake_case__ ), torch.cat(snake_case__ )
return logits, targs
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : Optional[int]=82 , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]=False , snake_case__ : List[Any]=16 ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : int = get_basic_setup(snake_case__ , snake_case__ , snake_case__ )
_snake_case , _snake_case : str = generate_predictions(snake_case__ , snake_case__ , snake_case__ )
assert (
len(snake_case__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case__ )}"
def UpperCAmelCase__ (snake_case__ : bool = False , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
_snake_case , _snake_case : Dict = get_mrpc_setup(snake_case__ , snake_case__ )
# First do baseline
_snake_case , _snake_case , _snake_case : Any = setup["""no"""]
model.to(snake_case__ )
model.eval()
for batch in dataloader:
batch.to(snake_case__ )
with torch.inference_mode():
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case__ , references=batch["""labels"""] )
_snake_case : Optional[Any] = metric.compute()
# Then do distributed
_snake_case , _snake_case , _snake_case : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case : Optional[int] = model(**snake_case__ )
_snake_case : List[str] = outputs.logits.argmax(dim=-1 )
_snake_case : List[Any] = batch["""labels"""]
_snake_case , _snake_case : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case__ , references=snake_case__ )
_snake_case : Tuple = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(snake_case__ , snake_case__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case : int = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(snake_case__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_snake_case : str = Accelerator()
test_torch_metrics(snake_case__ , 5_12 )
accelerator.state._reset_state()
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = 0
_snake_case : List[Any] = False
while not completed:
if counter == 1:
self.reset()
_snake_case : List[str] = self.advance()
if not self.does_advance(a_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_snake_case , _snake_case , _snake_case : Union[str, Any] = self.update(a_ )
counter += 1
if counter > 10_000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self: List[str], a_: Tuple=False ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Tuple, a_: List[int] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
_snake_case : Optional[int] = token_ids
_snake_case : Dict = len(self.token_ids )
_snake_case : Union[str, Any] = -1 # the index of the currently fulfilled step
_snake_case : str = False
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self: List[str], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : List[Any] = False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
_snake_case : List[str] = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case : str = True
_snake_case : Optional[int] = completed
else:
# failed to make progress.
_snake_case : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = False
_snake_case : Dict = 0
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self: Union[str, Any], a_: str=False ):
'''simple docstring'''
_snake_case : Dict = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case : List[Any] = self.seqlen
_snake_case : str = self.fulfilled_idx
_snake_case : Union[str, Any] = self.completed
return new_constraint
class lowercase:
'''simple docstring'''
def __init__( self: int, a_: List[List[int]], a_: int=True ):
'''simple docstring'''
_snake_case : List[str] = max([len(a_ ) for one in nested_token_ids] )
_snake_case : int = {}
for token_ids in nested_token_ids:
_snake_case : List[str] = root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
_snake_case : int = {}
_snake_case : List[Any] = level[token_id]
if no_subsets and self.has_subsets(a_, a_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f" {nested_token_ids}." )
_snake_case : int = root
def UpperCamelCase_ ( self: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.trie
for current_token in current_seq:
_snake_case : Any = start[current_token]
_snake_case : Any = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.next_tokens(a_ )
return len(a_ ) == 0
def UpperCamelCase_ ( self: Dict, a_: List[Any] ):
'''simple docstring'''
_snake_case : int = list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: int ):
'''simple docstring'''
_snake_case : str = self.count_leaves(a_ )
return len(a_ ) != leaf_count
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Tuple, a_: List[List[int]] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(a_, a_ ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
_snake_case : Optional[Any] = DisjunctiveTrie(a_ )
_snake_case : Optional[int] = nested_token_ids
_snake_case : Tuple = self.trie.max_height
_snake_case : List[str] = []
_snake_case : Union[str, Any] = False
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Optional[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self: Any, a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}" )
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
_snake_case : Optional[Any] = True
else:
_snake_case : Tuple = True
self.reset()
_snake_case : Any = self.trie.reached_leaf(self.current_seq )
_snake_case : Tuple = completed
return stepped, completed, reset
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = False
_snake_case : Tuple = []
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self: Dict, a_: List[Any]=False ):
'''simple docstring'''
_snake_case : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case : Tuple = self.seqlen
_snake_case : List[str] = self.current_seq
_snake_case : List[Any] = self.completed
return new_constraint
class lowercase:
'''simple docstring'''
def __init__( self: Any, a_: List[Constraint] ):
'''simple docstring'''
_snake_case : Any = constraints
# max # of steps required to fulfill a given constraint
_snake_case : Optional[int] = max([c.seqlen for c in constraints] )
_snake_case : Optional[Any] = len(a_ )
_snake_case : List[Any] = False
self.init_state()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = [constraint.copy(stateful=a_ ) for constraint in self.constraints]
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case : str = constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
else:
_snake_case : List[str] = self.inprogress_constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self: int, a_: Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case , _snake_case : int = self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
_snake_case , _snake_case : List[str] = False, False
if self.completed:
_snake_case : Dict = True
_snake_case : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case , _snake_case , _snake_case : Optional[int] = self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
_snake_case : Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case : Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
_snake_case , _snake_case , _snake_case : Any = pending_constraint.update(a_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(a_ )
_snake_case : Optional[Any] = None
if not complete and stepped:
_snake_case : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case : List[str] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self: Dict, a_: int=True ):
'''simple docstring'''
_snake_case : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case : List[Any] = [
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case : Dict = self.inprogress_constraint.copy(stateful=a_ )
_snake_case : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 28 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableDiffusionXLImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), attention_head_dim=(2, 4), use_linear_projection=a_, addition_embed_type="""text_time""", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
_snake_case : List[str] = EulerDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, steps_offset=1, beta_schedule="""scaled_linear""", timestep_spacing="""leading""", )
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act="""gelu""", projection_dim=32, )
_snake_case : Dict = CLIPTextModel(a_ )
_snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=a_ )
_snake_case : Tuple = CLIPTextModelWithProjection(a_ )
_snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=a_ )
_snake_case : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Dict=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
_snake_case : Optional[int] = image / 2 + 0.5
if str(a_ ).startswith("""mps""" ):
_snake_case : Optional[int] = torch.manual_seed(a_ )
else:
_snake_case : str = torch.Generator(device=a_ ).manual_seed(a_ )
_snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Tuple = self.get_dummy_components()
_snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**a_ )
_snake_case : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : str = self.get_dummy_inputs(a_ )
_snake_case : List[Any] = sd_pipe(**a_ ).images
_snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : List[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = StableDiffusionXLImgaImgPipeline(**a_ )
_snake_case : Union[str, Any] = sd_pipe.to(a_ )
_snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
# forward without prompt embeds
_snake_case : str = self.get_dummy_inputs(a_ )
_snake_case : Any = 3 * ["""this is a negative prompt"""]
_snake_case : Optional[Any] = negative_prompt
_snake_case : Optional[Any] = 3 * [inputs["""prompt"""]]
_snake_case : Any = sd_pipe(**a_ )
_snake_case : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_snake_case : Optional[int] = self.get_dummy_inputs(a_ )
_snake_case : Any = 3 * ["""this is a negative prompt"""]
_snake_case : List[str] = 3 * [inputs.pop("""prompt""" )]
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : str = sd_pipe.encode_prompt(a_, negative_prompt=a_ )
_snake_case : Any = sd_pipe(
**a_, prompt_embeds=a_, negative_prompt_embeds=a_, pooled_prompt_embeds=a_, negative_pooled_prompt_embeds=a_, )
_snake_case : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: List[str], a_: str, a_: Dict="cpu", a_: str=torch.floataa, a_: str=0 ):
'''simple docstring'''
_snake_case : str = torch.Generator(device=a_ ).manual_seed(a_ )
_snake_case : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) )
_snake_case : str = torch.from_numpy(a_ ).to(device=a_, dtype=a_ )
_snake_case : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = self.get_inputs(a_ )
_snake_case : Dict = pipe(**a_ ).images
_snake_case : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_snake_case : Optional[int] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 1 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A_ = get_logger(__name__)
class lowercase( enum.Enum ):
'''simple docstring'''
lowercase__ = "all_checks"
lowercase__ = "basic_checks"
lowercase__ = "no_checks"
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
def UpperCAmelCase__ (snake_case__ : Optional[dict] , snake_case__ : dict , snake_case__ : Optional[Any]=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
_snake_case : Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_snake_case : Any = """ for """ + verification_name if verification_name is not None else """"""
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
class lowercase( __a ):
'''simple docstring'''
def UpperCAmelCase__ (snake_case__ : Optional[dict] , snake_case__ : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
_snake_case : Optional[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : bool = True ):
"""simple docstring"""
if record_checksum:
_snake_case : int = shaaaa()
with open(snake_case__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(snake_case__ )
_snake_case : List[str] = m.hexdigest()
else:
_snake_case : List[Any] = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 28 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
while a != 0:
_snake_case , _snake_case : Tuple = b % a, a
return b
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if gcd(snake_case__ , snake_case__ ) != 1:
_snake_case : Union[str, Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(snake_case__ )
_snake_case , _snake_case , _snake_case : str = 1, 0, a
_snake_case , _snake_case , _snake_case : List[str] = 0, 1, m
while va != 0:
_snake_case : Union[str, Any] = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 28 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A_ = trt.Logger(trt.Logger.WARNING)
A_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A_ = logging.getLogger(__name__)
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
A_ = parser.parse_args()
if args.tokenizer_name:
A_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
A_ = args.per_device_eval_batch_size
A_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A_ = True
A_ = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
A_ = '''temp_engine/bert-fp16.engine'''
if args.inta:
A_ = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
A_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A_ = [network.get_input(i) for i in range(network.num_inputs)]
A_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Optional[int] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
_snake_case : Any = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
_snake_case : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
_snake_case : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
_snake_case : int = time.time()
_snake_case : str = end_time - start_time
_snake_case : int = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A_ = raw_datasets['''validation'''].column_names
A_ = '''question''' if '''question''' in column_names else column_names[0]
A_ = '''context''' if '''context''' in column_names else column_names[1]
A_ = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A_ = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
A_ = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_snake_case : List[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_snake_case : Union[str, Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_snake_case : str = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_snake_case : List[Any] = tokenized_examples.sequence_ids(snake_case__ )
_snake_case : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_snake_case : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_snake_case : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A_ = raw_datasets['''validation''']
# Validation Feature Creation
A_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
A_ = default_data_collator
A_ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
A_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str="eval" ):
"""simple docstring"""
_snake_case : int = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_snake_case : Optional[Any] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
_snake_case : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
_snake_case : Tuple = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
A_ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
A_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A_ = cuda.mem_alloc(h_outputa.nbytes)
A_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A_ = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F''' Num examples = {len(eval_dataset)}''')
logger.info(F''' Batch size = {args.per_device_eval_batch_size}''')
A_ = 0.0
A_ = 0
A_ = timeit.default_timer()
A_ = None
for step, batch in enumerate(eval_dataloader):
A_ , A_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A_ , A_ = outputs
A_ = torch.tensor(start_logits)
A_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
A_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
A_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
A_ = nested_truncate(all_preds, len(eval_dataset))
A_ = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
A_ = post_processing_function(eval_examples, eval_dataset, all_preds)
A_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'''Evaluation metrics: {eval_metric}''')
| 28 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
A_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
A_ = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
A_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
A_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
A_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = DPRContextEncoderTokenizer
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = DPRQuestionEncoderTokenizer
A_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
A_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
A_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__a )
class lowercase:
'''simple docstring'''
def __call__( self: List[str], a_: Optional[Any], a_: Optional[str] = None, a_: Optional[str] = None, a_: Union[bool, str] = False, a_: Union[bool, str] = False, a_: Optional[int] = None, a_: Optional[Union[str, TensorType]] = None, a_: Optional[bool] = None, **a_: Any, ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
a_, padding=a_, truncation=a_, max_length=a_, return_tensors=a_, return_attention_mask=a_, **a_, )
elif titles is None or texts is None:
_snake_case : Optional[int] = titles if texts is None else texts
return super().__call__(
a_, a_, padding=a_, truncation=a_, max_length=a_, return_tensors=a_, return_attention_mask=a_, **a_, )
_snake_case : List[Any] = titles if not isinstance(a_, a_ ) else [titles]
_snake_case : Optional[Any] = texts if not isinstance(a_, a_ ) else [texts]
_snake_case : List[Any] = len(a_ )
_snake_case : Union[str, Any] = questions if not isinstance(a_, a_ ) else [questions] * n_passages
assert len(a_ ) == len(
a_ ), f"There should be as many titles than texts but got {len(a_ )} titles and {len(a_ )} texts."
_snake_case : int = super().__call__(a_, a_, padding=a_, truncation=a_ )["""input_ids"""]
_snake_case : List[Any] = super().__call__(a_, add_special_tokens=a_, padding=a_, truncation=a_ )["""input_ids"""]
_snake_case : List[str] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a_, a_ )
]
}
if return_attention_mask is not False:
_snake_case : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_snake_case : str = attention_mask
return self.pad(a_, padding=a_, max_length=a_, return_tensors=a_ )
def UpperCamelCase_ ( self: List[str], a_: BatchEncoding, a_: DPRReaderOutput, a_: int = 16, a_: int = 64, a_: int = 4, ):
'''simple docstring'''
_snake_case : Optional[Any] = reader_input["""input_ids"""]
_snake_case , _snake_case , _snake_case : Optional[int] = reader_output[:3]
_snake_case : Union[str, Any] = len(a_ )
_snake_case : Any = sorted(range(a_ ), reverse=a_, key=relevance_logits.__getitem__ )
_snake_case : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_snake_case : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_snake_case : Dict = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_snake_case : Any = sequence_ids.index(self.pad_token_id )
else:
_snake_case : Optional[Any] = len(a_ )
_snake_case : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=a_, top_spans=a_, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=a_, start_index=a_, end_index=a_, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(a_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: List[int], a_: int, a_: int, ):
'''simple docstring'''
_snake_case : int = []
for start_index, start_score in enumerate(a_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_snake_case : Tuple = sorted(a_, key=lambda a_ : x[1], reverse=a_ )
_snake_case : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
_snake_case : int = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a )
class lowercase( __a , __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = DPRReaderTokenizer
| 28 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 1 |
"""simple docstring"""
from typing import Any
class lowercase:
'''simple docstring'''
def __init__( self: Tuple, a_: Any ):
'''simple docstring'''
_snake_case : List[str] = data
_snake_case : Dict = None
def __repr__( self: int ):
'''simple docstring'''
return f"Node({self.data})"
class lowercase:
'''simple docstring'''
def __init__( self: List[str] ):
'''simple docstring'''
_snake_case : int = None
def __iter__( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.head
while node:
yield node.data
_snake_case : Any = node.next
def __len__( self: int ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Dict ):
'''simple docstring'''
return "->".join([str(a_ ) for item in self] )
def __getitem__( self: Dict, a_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Any, a_: int, a_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
_snake_case : Dict = self.head
for _ in range(a_ ):
_snake_case : Tuple = current.next
_snake_case : int = data
def UpperCamelCase_ ( self: Any, a_: Any ):
'''simple docstring'''
self.insert_nth(len(self ), a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any ):
'''simple docstring'''
self.insert_nth(0, a_ )
def UpperCamelCase_ ( self: Any, a_: int, a_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
_snake_case : Tuple = Node(a_ )
if self.head is None:
_snake_case : Any = new_node
elif index == 0:
_snake_case : List[Any] = self.head # link new_node to head
_snake_case : Tuple = new_node
else:
_snake_case : List[Any] = self.head
for _ in range(index - 1 ):
_snake_case : Dict = temp.next
_snake_case : str = temp.next
_snake_case : str = new_node
def UpperCamelCase_ ( self: List[str] ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self: Tuple ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self: Tuple, a_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
_snake_case : int = self.head # default first node
if index == 0:
_snake_case : Dict = self.head.next
else:
_snake_case : Optional[int] = self.head
for _ in range(index - 1 ):
_snake_case : List[Any] = temp.next
_snake_case : List[str] = temp.next
_snake_case : Optional[Any] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = None
_snake_case : Optional[Any] = self.head
while current:
# Store the current node's next node.
_snake_case : Any = current.next
# Make the current node's next point backwards
_snake_case : List[Any] = prev
# Make the previous node be the current node
_snake_case : int = current
# Make the current node the next node (to progress iteration)
_snake_case : Union[str, Any] = next_node
# Return prev in order to put the head at the end
_snake_case : Dict = prev
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = [
-9,
1_00,
Node(77_34_51_12 ),
"""dlrow olleH""",
7,
55_55,
0,
-1_92.5_55_55,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
_snake_case : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case : Any = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCAmelCase__ ():
"""simple docstring"""
from doctest import testmod
testmod()
_snake_case : Optional[int] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case__ )
print("""\nReading/changing Node data using indexing:""" )
print(F"Element at Position 1: {linked_list[1]}" )
_snake_case : Dict = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case__ )
print(F"length of linked_list is : {len(snake_case__ )}" )
if __name__ == "__main__":
main()
| 28 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[Any] = 0
while number > 0:
_snake_case : Optional[Any] = number % 10
sum_of_digits += last_digit
_snake_case : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase__ (snake_case__ : int = 1_00 ):
"""simple docstring"""
_snake_case : Optional[int] = factorial(snake_case__ )
_snake_case : List[str] = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 28 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(snake_case__ , snake_case__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_snake_case : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
"""simple docstring"""
import pytest
A_ = '''__dummy_dataset1__'''
A_ = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCAmelCase__ ():
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ():
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[str] = dataset_loading_script_name
_snake_case : Optional[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=snake_case__ )
_snake_case : Tuple = script_dir / F"{script_name}.py"
with open(snake_case__ , """w""" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
| 28 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 1 |
"""simple docstring"""
import math
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str]=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
_snake_case : Optional[Any] = n
_snake_case : List[str] = [
[math.inf for j in range(0, a_ )] for i in range(0, a_ )
] # adjacency matrix for weight
_snake_case : Dict = [
[math.inf for j in range(0, a_ )] for i in range(0, a_ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self: List[str], a_: Any, a_: Any, a_: int ):
'''simple docstring'''
_snake_case : Tuple = w
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
_snake_case : Any = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: List[Any] ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
A_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ = datasets.utils.logging.get_logger(__name__)
A_ = ['''names''', '''prefix''']
A_ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
A_ = ['''encoding_errors''', '''on_bad_lines''']
A_ = ['''date_format''']
@dataclass
class lowercase( datasets.BuilderConfig ):
'''simple docstring'''
lowercase__ = ","
lowercase__ = None
lowercase__ = "infer"
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = "."
lowercase__ = None
lowercase__ = '"'
lowercase__ = 0
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = 0
lowercase__ = True
lowercase__ = False
lowercase__ = None
lowercase__ = 1_00_00
lowercase__ = None
lowercase__ = "strict"
lowercase__ = "error"
lowercase__ = None
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
if self.delimiter is not None:
_snake_case : int = self.delimiter
if self.column_names is not None:
_snake_case : Dict = self.column_names
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase__ = CsvConfig
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self: Tuple, a_: Optional[int] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_snake_case : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_, (str, list, tuple) ):
_snake_case : Any = data_files
if isinstance(a_, a_ ):
_snake_case : Optional[int] = [files]
_snake_case : Optional[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""files""": files} )]
_snake_case : List[Any] = []
for split_name, files in data_files.items():
if isinstance(a_, a_ ):
_snake_case : Tuple = [files]
_snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_, gen_kwargs={"""files""": files} ) )
return splits
def UpperCamelCase_ ( self: List[str], a_: pa.Table ):
'''simple docstring'''
if self.config.features is not None:
_snake_case : Dict = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
_snake_case : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Optional[int] = table_cast(a_, a_ )
return pa_table
def UpperCamelCase_ ( self: str, a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
_snake_case : Any = pd.read_csv(a_, iterator=a_, dtype=a_, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
_snake_case : Optional[int] = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(a_ )}: {e}" )
raise
| 28 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 1 |
"""simple docstring"""
import sys
import turtle
def UpperCAmelCase__ (snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase__ (snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] , snake_case__ : tuple[float, float] , snake_case__ : int , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
A_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
A_ = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 28 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase( __a ):
'''simple docstring'''
def __init__( self: int, a_: Optional[int], a_: int=13, a_: List[Any]=7, a_: List[str]=True, a_: Optional[Any]=True, a_: Any=True, a_: List[Any]=True, a_: Tuple=99, a_: Tuple=32, a_: Tuple=5, a_: Optional[Any]=4, a_: Optional[Any]=37, a_: List[Any]="gelu", a_: List[str]=0.1, a_: Union[str, Any]=0.1, a_: Union[str, Any]=512, a_: Tuple=16, a_: List[str]=2, a_: Optional[int]=0.02, a_: int=False, a_: Union[str, Any]=True, a_: Any="None", a_: Optional[int]=3, a_: Optional[Any]=4, a_: Union[str, Any]=None, ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Optional[int] = batch_size
_snake_case : Union[str, Any] = seq_length
_snake_case : Dict = is_training
_snake_case : Any = use_input_mask
_snake_case : int = use_token_type_ids
_snake_case : Any = use_labels
_snake_case : str = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Dict = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = num_labels
_snake_case : Any = num_choices
_snake_case : Dict = relative_attention
_snake_case : Union[str, Any] = position_biased_input
_snake_case : Any = pos_att_type
_snake_case : int = scope
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
_snake_case : Any = None
if self.use_token_type_ids:
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Tuple = None
_snake_case : Optional[int] = None
_snake_case : List[str] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : str = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_config()
_snake_case : List[str] = 300
return config
def UpperCamelCase_ ( self: int, a_: Any ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ), [] )
def UpperCamelCase_ ( self: Dict, a_: List[str], a_: Optional[Any], a_: Dict, a_: List[Any], a_: Optional[int], a_: str, a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_, attention_mask=a_, token_type_ids=a_ )[0]
_snake_case : List[str] = model(a_, token_type_ids=a_ )[0]
_snake_case : Optional[int] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase_ ( self: List[Any], a_: str, a_: List[str], a_: Dict, a_: Any, a_: List[str], a_: Optional[Any], a_: int ):
'''simple docstring'''
_snake_case : Dict = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: List[str], a_: str, a_: Tuple, a_: Any, a_: List[str], a_: Dict, a_: List[str], a_: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.num_labels
_snake_case : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: Tuple, a_: Optional[int], a_: List[Any], a_: Tuple, a_: str, a_: Dict ):
'''simple docstring'''
_snake_case : Any = self.num_labels
_snake_case : Optional[Any] = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: str, a_: Union[str, Any], a_: str, a_: Optional[int], a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(
a_, attention_mask=a_, token_type_ids=a_, start_positions=a_, end_positions=a_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Dict = config_and_inputs
_snake_case : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = DebertaModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self, config_class=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
_snake_case : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : List[Any] = model(a_, attention_mask=a_ )[0]
# compare the actual values for a slice.
_snake_case : Dict = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a_, atol=1E-4 ), f"{output[:, 1:4, 1:4]}" )
| 28 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case , _snake_case : Tuple = analyze_text(snake_case__ )
_snake_case : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
_snake_case : List[Any] = sum(single_char_strings.values() )
# one length string
_snake_case : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_snake_case : List[str] = single_char_strings[ch]
_snake_case : str = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
_snake_case : Union[str, Any] = sum(two_char_strings.values() )
_snake_case : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_snake_case : str = cha + cha
if sequence in two_char_strings:
_snake_case : List[str] = two_char_strings[sequence]
_snake_case : int = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = Counter() # type: ignore
_snake_case : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase__ ():
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 28 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = RobertaTokenizer
def __init__( self: int, a_: List[Any]=None, a_: Dict=None, a_: Optional[Any]=None, a_: Any="replace", a_: Optional[int]="<s>", a_: str="</s>", a_: Union[str, Any]="</s>", a_: Tuple="<s>", a_: Any="<unk>", a_: Optional[Any]="<pad>", a_: Union[str, Any]="<mask>", a_: Optional[int]=False, a_: Optional[int]=True, **a_: int, ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, errors=a_, bos_token=a_, eos_token=a_, sep_token=a_, cls_token=a_, unk_token=a_, pad_token=a_, mask_token=a_, add_prefix_space=a_, trim_offsets=a_, **a_, )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : List[str] = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[int] = pre_tok_class(**a_ )
_snake_case : Dict = add_prefix_space
_snake_case : Optional[Any] = """post_processor"""
_snake_case : Any = getattr(self.backend_tokenizer, a_, a_ )
if tokenizer_component_instance:
_snake_case : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Any = tuple(state["""sep"""] )
if "cls" in state:
_snake_case : List[str] = tuple(state["""cls"""] )
_snake_case : List[Any] = False
if state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = add_prefix_space
_snake_case : Any = True
if state.get("""trim_offsets""", a_ ) != trim_offsets:
_snake_case : int = trim_offsets
_snake_case : Tuple = True
if changes_to_apply:
_snake_case : Optional[Any] = getattr(a_, state.pop("""type""" ) )
_snake_case : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer, a_, a_ )
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else value
_snake_case : Tuple = value
def UpperCamelCase_ ( self: Optional[int], *a_: Any, **a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Any, *a_: Dict, **a_: Any ):
'''simple docstring'''
_snake_case : Tuple = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : str = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: str, a_: str=None ):
'''simple docstring'''
_snake_case : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self: Tuple, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 28 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: int, *a_: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: Union[str, Any], **a_: Any ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: List[str], **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 28 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : bool = True , snake_case__ : float = math.inf , snake_case__ : float = -math.inf , snake_case__ : float = math.inf , snake_case__ : float = -math.inf , snake_case__ : bool = False , snake_case__ : float = 1_00 , snake_case__ : float = 0.01 , snake_case__ : float = 1 , ):
"""simple docstring"""
_snake_case : Union[str, Any] = False
_snake_case : List[str] = search_prob
_snake_case : Dict = start_temperate
_snake_case : str = []
_snake_case : Any = 0
_snake_case : str = None
while not search_end:
_snake_case : int = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : List[str] = current_state
scores.append(snake_case__ )
iterations += 1
_snake_case : Union[str, Any] = None
_snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Tuple = random.randint(0 , len(snake_case__ ) - 1 ) # picking a random neighbor
_snake_case : Dict = neighbors.pop(snake_case__ )
_snake_case : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : int = picked_neighbor
else:
_snake_case : int = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : List[str] = picked_neighbor
_snake_case : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__ ) , snake_case__ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 28 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 1 |
"""simple docstring"""
import operator as op
A_ = '''scaler.pt'''
A_ = '''pytorch_model'''
A_ = '''random_states'''
A_ = '''optimizer'''
A_ = '''scheduler'''
A_ = '''pytorch_model.bin'''
A_ = '''pytorch_model.bin.index.json'''
A_ = '''model.safetensors'''
A_ = '''model.safetensors.index.json'''
A_ = '''1.10.2'''
A_ = '''py38'''
A_ = '''4.17.0'''
A_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
A_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
A_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
A_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
A_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
A_ = '''2.0.1'''
A_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
A_ = ['''default''', '''reduce-overhead''', '''max-autotune''']
A_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A_ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
A_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
A_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 28 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A_ = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
A_ = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
A_ = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ), codebase_urls=["""https://www.atticusprojectai.org/cuad"""], reference_urls=["""https://www.atticusprojectai.org/cuad"""], )
def UpperCamelCase_ ( self: List[Any], a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_snake_case : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_snake_case : Optional[int] = evaluate(dataset=a_, predictions=a_ )
return score
| 28 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A_ = TypeVar('''T''')
class lowercase( Generic[T] ):
'''simple docstring'''
def __init__( self: Optional[int], a_: list[T], a_: Callable[[T, T], T] ):
'''simple docstring'''
_snake_case : Any | T = None
_snake_case : int = len(a_ )
_snake_case : list[T] = [any_type for _ in range(self.N )] + arr
_snake_case : List[str] = fnc
self.build()
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for p in range(self.N - 1, 0, -1 ):
_snake_case : Optional[int] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self: List[Any], a_: int, a_: T ):
'''simple docstring'''
p += self.N
_snake_case : List[str] = v
while p > 1:
_snake_case : Optional[int] = p // 2
_snake_case : int = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def UpperCamelCase_ ( self: List[Any], a_: int, a_: int ): # noqa: E741
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = l + self.N, r + self.N
_snake_case : T | None = None
while l <= r:
if l % 2 == 1:
_snake_case : int = self.st[l] if res is None else self.fn(a_, self.st[l] )
if r % 2 == 0:
_snake_case : str = self.st[r] if res is None else self.fn(a_, self.st[r] )
_snake_case , _snake_case : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A_ = SegmentTree(test_array, min)
A_ = SegmentTree(test_array, max)
A_ = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase__ ():
"""simple docstring"""
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
_snake_case : List[Any] = reduce(snake_case__ , test_array[i : j + 1] )
_snake_case : Dict = reduce(snake_case__ , test_array[i : j + 1] )
_snake_case : Optional[Any] = reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
A_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 28 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A_ = logging.get_logger('''transformers.models.speecht5''')
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
hf_model.apply_weight_norm()
_snake_case : Dict = checkpoint["""input_conv.weight_g"""]
_snake_case : Union[str, Any] = checkpoint["""input_conv.weight_v"""]
_snake_case : int = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_snake_case : str = checkpoint[F"upsamples.{i}.1.weight_g"]
_snake_case : Optional[int] = checkpoint[F"upsamples.{i}.1.weight_v"]
_snake_case : Optional[Any] = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case : Any = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_snake_case : Tuple = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_snake_case : Any = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_snake_case : Union[str, Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_snake_case : List[str] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_snake_case : int = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_snake_case : int = checkpoint["""output_conv.1.weight_g"""]
_snake_case : List[Any] = checkpoint["""output_conv.1.weight_v"""]
_snake_case : Any = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str=None , snake_case__ : Dict=None , ):
"""simple docstring"""
if config_path is not None:
_snake_case : Any = SpeechTaHifiGanConfig.from_pretrained(snake_case__ )
else:
_snake_case : Union[str, Any] = SpeechTaHifiGanConfig()
_snake_case : Union[str, Any] = SpeechTaHifiGan(snake_case__ )
_snake_case : int = torch.load(snake_case__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , snake_case__ , snake_case__ )
_snake_case : Any = np.load(snake_case__ )
_snake_case : Optional[int] = stats[0].reshape(-1 )
_snake_case : Tuple = stats[1].reshape(-1 )
_snake_case : Dict = torch.from_numpy(snake_case__ ).float()
_snake_case : int = torch.from_numpy(snake_case__ ).float()
model.save_pretrained(snake_case__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A_ = False
class lowercase( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""", torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Dict = pipe.dual_guided(
prompt="""first prompt""", image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=2, output_type="""numpy""", ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_snake_case : str = VersatileDiffusionPipeline.from_pretrained(a_, torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = generator.manual_seed(0 )
_snake_case : int = pipe.dual_guided(
prompt="""first prompt""", image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=2, output_type="""numpy""", ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""", torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """cyberpunk 2077"""
_snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_snake_case : str = torch.manual_seed(0 )
_snake_case : Tuple = pipe.dual_guided(
prompt=a_, image=a_, text_to_image_strength=0.75, generator=a_, guidance_scale=7.5, num_inference_steps=50, output_type="""numpy""", ).images
_snake_case : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : Dict = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_snake_case : Optional[int] = """A painting of a squirrel eating a burger """
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Dict = pipe.text_to_image(
prompt=a_, generator=a_, guidance_scale=7.5, num_inference_steps=50, output_type="""numpy""" ).images
_snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : str = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_snake_case : List[Any] = pipe.image_variation(a_, generator=a_, output_type="""numpy""" ).images
_snake_case : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : Any = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 28 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = KandinskyVaaPipeline
lowercase__ = [
"image_embeds",
"negative_image_embeds",
]
lowercase__ = ["image_embeds", "negative_image_embeds"]
lowercase__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ = False
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_snake_case : Tuple = UNetaDConditionModel(**A__ )
return model
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.dummy_unet
_snake_case : int = self.dummy_movq
_snake_case : List[Any] = DDIMScheduler(
num_train_timesteps=1_000, beta_schedule="""linear""", beta_start=0.00_085, beta_end=0.012, clip_sample=A__, set_alpha_to_one=A__, steps_offset=1, prediction_type="""epsilon""", thresholding=A__, )
_snake_case : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: List[str]=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A__ ) ).to(A__ )
_snake_case : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A__ )
if str(A__ ).startswith("""mps""" ):
_snake_case : Optional[int] = torch.manual_seed(A__ )
else:
_snake_case : str = torch.Generator(device=A__ ).manual_seed(A__ )
_snake_case : Tuple = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = """cpu"""
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Union[str, Any] = self.pipeline_class(**A__ )
_snake_case : Tuple = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
_snake_case : str = pipe(**self.get_dummy_inputs(A__ ) )
_snake_case : Union[str, Any] = output.images
_snake_case : List[Any] = pipe(
**self.get_dummy_inputs(A__ ), return_dict=A__, )[0]
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
_snake_case : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(A__ )
_snake_case : Optional[int] = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""", torch_dtype=torch.floataa )
_snake_case : Optional[int] = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
_snake_case : Dict = """red cat, 4k photo"""
_snake_case : str = torch.Generator(device="""cuda""" ).manual_seed(0 )
_snake_case , _snake_case : Union[str, Any] = pipe_prior(
A__, generator=A__, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
_snake_case : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 )
_snake_case : str = pipeline(
image_embeds=A__, negative_image_embeds=A__, generator=A__, num_inference_steps=100, output_type="""np""", )
_snake_case : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A__, A__ )
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase( _snake_case ):
'''simple docstring'''
lowercase__ = "lxmert"
lowercase__ = {}
def __init__( self: int, a_: Any=30_522, a_: List[str]=768, a_: Union[str, Any]=12, a_: List[Any]=9_500, a_: Any=1_600, a_: Union[str, Any]=400, a_: Tuple=3_072, a_: Dict="gelu", a_: Tuple=0.1, a_: Tuple=0.1, a_: int=512, a_: List[str]=2, a_: List[str]=0.02, a_: str=1E-12, a_: str=9, a_: int=5, a_: Optional[int]=5, a_: List[Any]=2_048, a_: Any=4, a_: Dict=6.67, a_: Any=True, a_: Union[str, Any]=True, a_: Any=True, a_: Tuple=True, a_: Optional[Any]=True, a_: Optional[int]=True, a_: Tuple=True, **a_: List[Any], ):
'''simple docstring'''
_snake_case : List[str] = vocab_size
_snake_case : int = hidden_size
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : Optional[int] = num_qa_labels
_snake_case : List[Any] = num_object_labels
_snake_case : str = num_attr_labels
_snake_case : Any = l_layers
_snake_case : Optional[int] = x_layers
_snake_case : List[str] = r_layers
_snake_case : str = visual_feat_dim
_snake_case : List[Any] = visual_pos_dim
_snake_case : List[str] = visual_loss_normalizer
_snake_case : int = task_matched
_snake_case : List[Any] = task_mask_lm
_snake_case : Optional[int] = task_obj_predict
_snake_case : Tuple = task_qa
_snake_case : Any = visual_obj_loss
_snake_case : Optional[int] = visual_attr_loss
_snake_case : List[Any] = visual_feat_loss
_snake_case : Optional[Any] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowerCAmelCase__ )
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from functools import reduce
A_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase__ (snake_case__ : List[str] = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case__ , snake_case__ : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["input_features", "is_longer"]
def __init__( self: List[Any], a_: List[Any]=64, a_: int=48_000, a_: Union[str, Any]=480, a_: Any=10, a_: Optional[int]=1_024, a_: Optional[int]=0.0, a_: Tuple=False, a_: float = 0, a_: float = 14_000, a_: int = None, a_: str = "fusion", a_: str = "repeatpad", **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_, sampling_rate=UpperCamelCase_, padding_value=UpperCamelCase_, return_attention_mask=UpperCamelCase_, **UpperCamelCase_, )
_snake_case : Tuple = top_db
_snake_case : Any = truncation
_snake_case : str = padding
_snake_case : int = fft_window_size
_snake_case : Any = (fft_window_size >> 1) + 1
_snake_case : int = hop_length
_snake_case : Any = max_length_s
_snake_case : str = max_length_s * sampling_rate
_snake_case : Any = sampling_rate
_snake_case : List[Any] = frequency_min
_snake_case : Tuple = frequency_max
_snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=UpperCamelCase_, min_frequency=UpperCamelCase_, max_frequency=UpperCamelCase_, sampling_rate=UpperCamelCase_, norm=UpperCamelCase_, mel_scale="""htk""", )
_snake_case : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=UpperCamelCase_, min_frequency=UpperCamelCase_, max_frequency=UpperCamelCase_, sampling_rate=UpperCamelCase_, norm="""slaney""", mel_scale="""slaney""", )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = copy.deepcopy(self.__dict__ )
_snake_case : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase_ ( self: Dict, a_: np.array, a_: Optional[np.array] = None ):
'''simple docstring'''
_snake_case : List[str] = spectrogram(
UpperCamelCase_, window_function(self.fft_window_size, """hann""" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=UpperCamelCase_, log_mel="""dB""", )
return log_mel_spectrogram.T
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case : Union[str, Any] = [0]
# randomly choose index for each part
_snake_case : Tuple = np.random.choice(ranges[0] )
_snake_case : int = np.random.choice(ranges[1] )
_snake_case : Any = np.random.choice(ranges[2] )
_snake_case : int = mel[idx_front : idx_front + chunk_frames, :]
_snake_case : int = mel[idx_middle : idx_middle + chunk_frames, :]
_snake_case : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_snake_case : List[Any] = torch.tensor(mel[None, None, :] )
_snake_case : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_, size=[chunk_frames, 64], mode="""bilinear""", align_corners=UpperCamelCase_ )
_snake_case : str = mel_shrink[0][0].numpy()
_snake_case : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def UpperCamelCase_ ( self: List[str], a_: np.array, a_: List[Any], a_: int, a_: Optional[int] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_snake_case : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_snake_case : Any = len(UpperCamelCase_ ) - max_length
_snake_case : Dict = np.random.randint(0, overflow + 1 )
_snake_case : Optional[int] = waveform[idx : idx + max_length]
_snake_case : Dict = self._np_extract_fbank_features(UpperCamelCase_, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_snake_case : List[Any] = self._np_extract_fbank_features(UpperCamelCase_, self.mel_filters )
_snake_case : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_snake_case : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_snake_case : Optional[Any] = np.stack([mel, mel, mel, mel], axis=0 )
_snake_case : List[Any] = False
else:
_snake_case : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
_snake_case : int = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
_snake_case : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_snake_case : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_snake_case : List[str] = np.stack(np.tile(UpperCamelCase_, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_snake_case : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_snake_case : Union[str, Any] = np.stack(np.tile(UpperCamelCase_, UpperCamelCase_ ) )
_snake_case : Dict = np.pad(UpperCamelCase_, (0, max_length - waveform.shape[0]), mode="""constant""", constant_values=0 )
if truncation == "fusion":
_snake_case : str = self._np_extract_fbank_features(UpperCamelCase_, self.mel_filters )
_snake_case : Dict = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
_snake_case : List[Any] = self._np_extract_fbank_features(UpperCamelCase_, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: Union[str, Any], a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: str = None, a_: Optional[str] = None, a_: Optional[int] = None, a_: Optional[int] = None, a_: Optional[Union[str, TensorType]] = None, **a_: Dict, ):
'''simple docstring'''
_snake_case : Dict = truncation if truncation is not None else self.truncation
_snake_case : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_snake_case : Optional[Any] = isinstance(UpperCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_snake_case : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : Dict = [np.asarray(UpperCamelCase_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_, np.ndarray ):
_snake_case : Any = np.asarray(UpperCamelCase_, dtype=np.floataa )
elif isinstance(UpperCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_snake_case : Optional[Any] = [
self._get_input_mel(UpperCamelCase_, max_length if max_length else self.nb_max_samples, UpperCamelCase_, UpperCamelCase_ )
for waveform in raw_speech
]
_snake_case : List[Any] = []
_snake_case : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_snake_case : Optional[Any] = np.random.randint(0, len(UpperCamelCase_ ) )
_snake_case : str = True
if isinstance(input_mel[0], UpperCamelCase_ ):
_snake_case : str = [np.asarray(UpperCamelCase_, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_snake_case : Tuple = [[longer] for longer in is_longer]
_snake_case : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_snake_case : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_snake_case : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[str], a_: List[str]=13, a_: Dict=7, a_: List[Any]=True, a_: List[Any]=True, a_: Union[str, Any]=True, a_: Optional[Any]=True, a_: Optional[int]=99, a_: List[Any]=16, a_: Optional[Any]=36, a_: Optional[int]=6, a_: Union[str, Any]=6, a_: Optional[int]=6, a_: Dict=37, a_: List[Any]="gelu", a_: Tuple=0.1, a_: Optional[Any]=0.1, a_: List[Any]=512, a_: Dict=16, a_: Union[str, Any]=2, a_: Dict=0.02, a_: Optional[Any]=3, a_: Dict=4, a_: Dict=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : List[Any] = batch_size
_snake_case : Any = seq_length
_snake_case : List[str] = is_training
_snake_case : int = use_input_mask
_snake_case : List[str] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : List[Any] = embedding_size
_snake_case : Dict = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : Optional[int] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : str = num_labels
_snake_case : List[Any] = num_choices
_snake_case : Any = scope
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[int] = None
if self.use_input_mask:
_snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Optional[Any] = None
if self.use_token_type_ids:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Tuple = None
_snake_case : List[str] = None
_snake_case : int = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : str = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: Dict, a_: int, a_: List[str], a_: List[str], a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase )
_snake_case : Any = model(__lowerCamelCase, token_type_ids=__lowerCamelCase )
_snake_case : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, Any], a_: Optional[Any], a_: Dict, a_: str, a_: Union[str, Any], a_: int, a_: Tuple ):
'''simple docstring'''
_snake_case : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase, sentence_order_label=__lowerCamelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self: str, a_: str, a_: List[Any], a_: Any, a_: str, a_: List[Any], a_: Optional[int], a_: Tuple ):
'''simple docstring'''
_snake_case : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Tuple = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int], a_: List[Any], a_: Optional[Any], a_: Optional[Any], a_: Optional[Any], a_: Any, a_: int ):
'''simple docstring'''
_snake_case : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : str = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, start_positions=__lowerCamelCase, end_positions=__lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Optional[Any], a_: Optional[Any], a_: Optional[int], a_: Optional[int], a_: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: List[str], a_: List[str], a_: Any, a_: Dict, a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.num_labels
_snake_case : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Tuple = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: Tuple, a_: Any, a_: Optional[Any], a_: List[Any], a_: List[Any], a_: Any, a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_choices
_snake_case : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Dict = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
(
_snake_case
) : Union[str, Any] = config_and_inputs
_snake_case : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCamelCase_ ( self: List[str], a_: int, a_: Tuple, a_: Dict=False ):
'''simple docstring'''
_snake_case : Any = super()._prepare_for_class(__lowerCamelCase, __lowerCamelCase, return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
_snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=__lowerCamelCase )
_snake_case : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__lowerCamelCase )
return inputs_dict
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = AlbertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self, config_class=__lowerCamelCase, hidden_size=37 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = AlbertModel.from_pretrained("""albert-base-v2""" )
_snake_case : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_snake_case : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : List[Any] = model(__lowerCamelCase, attention_mask=__lowerCamelCase )[0]
_snake_case : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __lowerCamelCase )
_snake_case : Dict = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __lowerCamelCase, atol=1E-4 ) )
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A_ = Lock()
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_snake_case : int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_snake_case : List[Any] = min(lowerCamelCase_ , lowerCamelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_snake_case : int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_snake_case : List[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase_ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = []
_snake_case : Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_snake_case : Optional[int] = Pipe()
_snake_case : List[Any] = Pipe()
process_array_.append(
Process(
target=lowerCamelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_snake_case : Tuple = temp_rs
_snake_case : Tuple = temp_rr
for i in range(1 , len(lowerCamelCase_ ) - 1 ):
_snake_case : int = Pipe()
_snake_case : Any = Pipe()
process_array_.append(
Process(
target=lowerCamelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_snake_case : List[Any] = temp_rs
_snake_case : int = temp_rr
process_array_.append(
Process(
target=lowerCamelCase_ , args=(
len(lowerCamelCase_ ) - 1,
arr[len(lowerCamelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase_ ) ):
_snake_case : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCamelCase_ )
_snake_case : Optional[int] = odd_even_transposition(lowerCamelCase_ )
print("""Sorted List\n""" )
print(*lowerCamelCase_ )
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = 'trajectory_transformer'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: str, a_: Any=100, a_: Optional[Any]=5, a_: Dict=1, a_: List[str]=1, a_: Optional[int]=249, a_: Dict=6, a_: Optional[Any]=17, a_: Optional[Any]=25, a_: List[Any]=4, a_: Optional[Any]=4, a_: List[Any]=128, a_: Any=0.1, a_: Any=0.1, a_: Optional[int]=0.1, a_: str=0.0_006, a_: List[str]=512, a_: Optional[Any]=0.02, a_: Tuple=1E-12, a_: int=1, a_: Any=True, a_: Tuple=1, a_: Dict=50_256, a_: Optional[int]=50_256, **a_: int, ):
'''simple docstring'''
_snake_case : Optional[Any] = vocab_size
_snake_case : Union[str, Any] = action_weight
_snake_case : Any = reward_weight
_snake_case : str = value_weight
_snake_case : List[str] = max_position_embeddings
_snake_case : Dict = block_size
_snake_case : Union[str, Any] = action_dim
_snake_case : Tuple = observation_dim
_snake_case : Any = transition_dim
_snake_case : Optional[int] = learning_rate
_snake_case : Optional[int] = n_layer
_snake_case : Tuple = n_head
_snake_case : int = n_embd
_snake_case : List[str] = embd_pdrop
_snake_case : Optional[Any] = attn_pdrop
_snake_case : Tuple = resid_pdrop
_snake_case : List[str] = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = kaiming_initializer_range
_snake_case : int = use_cache
super().__init__(pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, **_lowercase )
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A_ = logging.getLogger()
def UpperCAmelCase__ (snake_case__ : Path , snake_case__ : list ):
"""simple docstring"""
_snake_case : List[Any] = '''\n'''.join(__A )
Path(__A ).open("""w""" ).writelines(__A )
A_ = '''patrickvonplaten/t5-tiny-random'''
A_ = '''sshleifer/bart-tiny-random'''
A_ = '''sshleifer/tiny-mbart'''
A_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase( __A ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_snake_case : Optional[int] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_snake_case : List[str] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(a_, a_ )
_snake_case : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
_snake_case : Tuple = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_snake_case : int = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(a_, """argv""", a_ ):
run_generate()
assert Path(a_ ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self.run_eval_tester(a_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self: Any, a_: Optional[Any] ):
'''simple docstring'''
self.run_eval_tester(a_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self: List[str], a_: Tuple ):
'''simple docstring'''
_snake_case : Any = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_snake_case : Optional[Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_snake_case : Union[str, Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_snake_case : List[str] = Path(self.get_auto_remove_tmp_dir() )
_snake_case : Any = str(tmp_dir / """scores.json""" )
_snake_case : Union[str, Any] = str(tmp_dir / """val.target""" )
_dump_articles(a_, text["""en"""] )
_dump_articles(a_, text["""de"""] )
_snake_case : Tuple = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_snake_case : str = f"\n run_eval_search.py\n {model}\n {str(a_ )}\n {str(a_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(a_, """argv""", a_ ):
with CaptureStdout() as cs:
run_search()
_snake_case : List[Any] = [''' num_beams | length_penalty''', model, '''Best score args''']
_snake_case : List[Any] = ['''Info''']
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(a_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a_ ).exists()
os.remove(Path(a_ ) )
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A_ = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A_ = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Dict = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = """rougeLsum"""
_snake_case : Union[str, Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
_snake_case : Dict = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = ["""rouge1""", """rouge2""", """rougeL"""]
_snake_case : Optional[int] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
_snake_case : Optional[int] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_snake_case : Dict = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_snake_case : Union[str, Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_snake_case : str = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=__lowerCAmelCase )["""rougeLsum"""]
_snake_case : Union[str, Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_snake_case : Union[str, Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "mgp-str"
def __init__( self: Union[str, Any], a_: Optional[int]=[32, 128], a_: Tuple=4, a_: Any=3, a_: int=27, a_: Union[str, Any]=38, a_: List[Any]=50_257, a_: List[Any]=30_522, a_: Optional[int]=768, a_: List[str]=12, a_: List[str]=12, a_: Optional[Any]=4.0, a_: List[Any]=True, a_: List[Any]=False, a_: Tuple=1E-5, a_: Any=0.0, a_: List[Any]=0.0, a_: List[Any]=0.0, a_: Optional[Any]=False, a_: List[Any]=0.02, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(**__A )
_snake_case : Any = image_size
_snake_case : Optional[int] = patch_size
_snake_case : List[str] = num_channels
_snake_case : List[Any] = max_token_length
_snake_case : str = num_character_labels
_snake_case : Dict = num_bpe_labels
_snake_case : Optional[Any] = num_wordpiece_labels
_snake_case : List[Any] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : List[str] = mlp_ratio
_snake_case : List[str] = distilled
_snake_case : Tuple = layer_norm_eps
_snake_case : str = drop_rate
_snake_case : Optional[int] = qkv_bias
_snake_case : Dict = attn_drop_rate
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : List[Any] = output_aa_attentions
_snake_case : Tuple = initializer_range
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class lowercase( UpperCamelCase_ ):
'''simple docstring'''
lowercase__ = """open-llama"""
def __init__( self: Optional[int], a_: Union[str, Any]=100_000, a_: str=4_096, a_: Dict=11_008, a_: Union[str, Any]=32, a_: List[str]=32, a_: List[Any]="silu", a_: str=2_048, a_: Tuple=0.02, a_: List[Any]=1E-6, a_: Tuple=True, a_: Any=0, a_: str=1, a_: str=2, a_: Tuple=False, a_: str=True, a_: Union[str, Any]=0.1, a_: List[str]=0.1, a_: int=True, a_: Any=True, a_: str=None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Optional[Any] = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Tuple = intermediate_size
_snake_case : Tuple = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Any = initializer_range
_snake_case : List[str] = rms_norm_eps
_snake_case : str = use_cache
_snake_case : Union[str, Any] = kwargs.pop(
"""use_memorry_efficient_attention""", _a )
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Dict = attention_dropout_prob
_snake_case : Dict = use_stable_embedding
_snake_case : Union[str, Any] = shared_input_output_embedding
_snake_case : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, tie_word_embeddings=_a, **_a, )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_snake_case : List[Any] = self.rope_scaling.get("""type""", _a )
_snake_case : Optional[Any] = self.rope_scaling.get("""factor""", _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_a, _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
import numpy as np
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Optional[Any]=None, a_: Dict=None, a_: Optional[int]=None, a_: Tuple=None, a_: int=None ):
'''simple docstring'''
self.set_matricies(red=a_, green=a_, blue=a_, red_edge=a_, nir=a_ )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=None, a_: int=None, a_: Any=None ):
'''simple docstring'''
if red is not None:
_snake_case : int = red
if green is not None:
_snake_case : Optional[Any] = green
if blue is not None:
_snake_case : List[Any] = blue
if red_edge is not None:
_snake_case : List[str] = red_edge
if nir is not None:
_snake_case : List[str] = nir
return True
def UpperCamelCase_ ( self: List[Any], a_: List[str]="", a_: Tuple=None, a_: int=None, a_: Optional[int]=None, a_: List[str]=None, a_: str=None ):
'''simple docstring'''
self.set_matricies(red=a_, green=a_, blue=a_, red_edge=a_, nir=a_ )
_snake_case : Tuple = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase_ ( self: List[str], a_: List[str]=0.08, a_: Dict=1.22, a_: int=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.nir - self.green
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCamelCase_ ( self: Dict, a_: Union[str, Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase_ ( self: List[str], a_: List[Any]=None, a_: Union[str, Any]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.nir / self.red
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_snake_case : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return self.nir / self.red
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase:
'''simple docstring'''
def __init__( self: Optional[int], a_: Optional[int], a_: List[Any]=99, a_: Any=13, a_: Dict=16, a_: Dict=7, a_: Tuple=True, a_: int=True, a_: int=True, a_: List[str]=False, a_: List[str]=True, a_: Optional[int]=2, a_: Any=32, a_: List[Any]=4, a_: Optional[int]=4, a_: Dict=30, a_: str=0, a_: Union[str, Any]=1, a_: str=2, a_: Optional[Any]=None, ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : int = batch_size
_snake_case : List[Any] = decoder_seq_length
# For common tests
_snake_case : Optional[Any] = self.decoder_seq_length
_snake_case : Optional[Any] = is_training
_snake_case : Union[str, Any] = use_attention_mask
_snake_case : List[Any] = use_labels
_snake_case : List[Any] = vocab_size
_snake_case : str = d_model
_snake_case : List[Any] = d_model
_snake_case : int = decoder_layers
_snake_case : Any = decoder_layers
_snake_case : Optional[int] = decoder_ffn_dim
_snake_case : int = decoder_attention_heads
_snake_case : Optional[int] = decoder_attention_heads
_snake_case : int = eos_token_id
_snake_case : str = bos_token_id
_snake_case : int = pad_token_id
_snake_case : Any = decoder_start_token_id
_snake_case : str = use_cache
_snake_case : int = max_position_embeddings
_snake_case : List[str] = None
_snake_case : List[str] = decoder_seq_length
_snake_case : List[Any] = 2
_snake_case : Dict = 1
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
_snake_case : str = None
if self.use_attention_mask:
_snake_case : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2 )
_snake_case : str = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
_snake_case : List[str] = TrOCRConfig(
vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[int], a_: str, a_: Tuple, ):
'''simple docstring'''
_snake_case : Optional[Any] = True
_snake_case : Dict = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
_snake_case : Tuple = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_snake_case : str = model(_UpperCAmelCase, use_cache=_UpperCAmelCase )
_snake_case : Optional[int] = model(_UpperCAmelCase )
_snake_case : Any = model(_UpperCAmelCase, use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
_snake_case : List[Any] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((2, 1), config.vocab_size - 1 ) + 1
# append to next input_ids and
_snake_case : Any = torch.cat([input_ids, next_tokens], dim=-1 )
_snake_case : int = model(_UpperCAmelCase )["""last_hidden_state"""]
_snake_case : Optional[int] = model(_UpperCAmelCase, past_key_values=_UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_snake_case : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
_snake_case : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_snake_case : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs
_snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ = True
lowercase__ = False
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self, is_training=_UpperCAmelCase )
_snake_case : Any = ConfigTester(self, config_class=_UpperCAmelCase )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A_ = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, """schedulers/""" ) )
_snake_case : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase, """src/diffusers/schedulers/scheduling_ddpm.py""" ), os.path.join(self.diffusers_dir, """schedulers/scheduling_ddpm.py""" ), )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: int, a_: Dict, a_: Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_snake_case : int = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_snake_case : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
_snake_case : Optional[int] = black.format_str(_UpperCamelCase, mode=_UpperCamelCase )
_snake_case : Optional[Any] = os.path.join(self.diffusers_dir, """new_code.py""" )
with open(_UpperCamelCase, """w""", newline="""\n""" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_UpperCamelCase )
with open(_UpperCamelCase, """r""" ) as f:
self.assertTrue(f.read(), _UpperCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""", """DDPMSchedulerOutput""", REFERENCE_CODE + """\n""", )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""", """DDPMSchedulerOutput""", _UpperCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""", """TestSchedulerOutput""", re.sub("""DDPM""", """Test""", _UpperCamelCase ), )
# Copy consistency with a really long name
_snake_case : Union[str, Any] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub("""Bert""", _UpperCamelCase, _UpperCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""", """TestSchedulerOutput""", _UpperCamelCase, overwrite_result=re.sub("""DDPM""", """Test""", _UpperCamelCase ), )
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ = get_tests_dir('''fixtures''')
A_ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
A_ = get_tests_dir('''fixtures/dummy-config.json''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Any = 0
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Tuple = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_A, _A )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A, _A )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_snake_case : Any = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_snake_case : str = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
_snake_case : List[str] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
_snake_case : Dict = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_A, _A )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A, _A )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A, """bert-base is not a local folder and is not a valid model identifier""" ):
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
with self.assertRaisesRegex(
_A, r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A, revision="""aaaaaa""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_A, """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""", ):
_snake_case : str = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
with self.assertRaises(_A ):
_snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
_snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""", trust_remote_code=_A )
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""", trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__, """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_snake_case : str = AutoFeatureExtractor.from_pretrained(_A, trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, """NewFeatureExtractor""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
try:
AutoConfig.register("""custom""", _A )
AutoFeatureExtractor.register(_A, _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A, _A )
# Now that the config is registered, it can be used as any other config with the auto-API
_snake_case : Tuple = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A, _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
class lowercase( a__ ):
'''simple docstring'''
lowercase__ = True
try:
AutoConfig.register("""custom""", _A )
AutoFeatureExtractor.register(_A, _A )
# If remote code is not set, the default is to use local
_snake_case : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__, """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_snake_case : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""", trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__, """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""", trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__, """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_A, """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : str = 0 ):
"""simple docstring"""
_snake_case : List[str] = length or len(_lowerCamelCase )
_snake_case : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_snake_case : Optional[Any] = list_data[i + 1], list_data[i]
_snake_case : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "umt5"
lowercase__ = ["past_key_values"]
def __init__( self: Any, a_: Union[str, Any]=250_112, a_: Tuple=512, a_: List[Any]=64, a_: str=1_024, a_: List[str]=8, a_: Dict=None, a_: Optional[int]=6, a_: int=32, a_: Dict=128, a_: List[Any]=0.1, a_: List[Any]=1E-6, a_: List[str]=1.0, a_: Union[str, Any]="gated-gelu", a_: Union[str, Any]=True, a_: Dict=True, a_: Tuple="T5Tokenizer", a_: List[str]=True, a_: int=0, a_: int=1, a_: int=0, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase_, tokenizer_class=lowerCamelCase_, tie_word_embeddings=lowerCamelCase_, pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
_snake_case : List[str] = vocab_size
_snake_case : List[Any] = d_model
_snake_case : List[Any] = d_kv
_snake_case : Optional[int] = d_ff
_snake_case : List[str] = num_layers
_snake_case : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case : Dict = num_heads
_snake_case : int = relative_attention_num_buckets
_snake_case : str = relative_attention_max_distance
_snake_case : str = dropout_rate
_snake_case : int = layer_norm_epsilon
_snake_case : Optional[Any] = initializer_factor
_snake_case : List[str] = feed_forward_proj
_snake_case : List[str] = use_cache
_snake_case : Any = self.feed_forward_proj.split("""-""" )
_snake_case : Tuple = act_info[-1]
_snake_case : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
if feed_forward_proj == "gated-gelu":
_snake_case : Any = '''gelu_new'''
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return self.d_model
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.num_heads
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return self.num_layers
class lowercase( _UpperCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_snake_case : int = '''past_encoder_sequence + sequence'''
_snake_case : str = {0: '''batch'''}
_snake_case : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_snake_case : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
_snake_case : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 13
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return 5E-4
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class lowercase( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = 'xlm-prophetnet'
lowercase__ = ['past_key_values']
lowercase__ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self: Optional[Any], a_: Optional[float] = 0.1, a_: Optional[Union[str, Callable]] = "gelu", a_: Optional[int] = 30_522, a_: Optional[int] = 1_024, a_: Optional[int] = 4_096, a_: Optional[int] = 12, a_: Optional[int] = 16, a_: Optional[int] = 4_096, a_: Optional[int] = 12, a_: Optional[int] = 16, a_: Optional[float] = 0.1, a_: Optional[float] = 0.1, a_: Optional[int] = 512, a_: Optional[float] = 0.02, a_: Optional[bool] = True, a_: Optional[bool] = True, a_: Optional[int] = 0, a_: Optional[int] = 2, a_: Optional[int] = 32, a_: Optional[int] = 128, a_: Optional[bool] = False, a_: Optional[float] = 0.0, a_: Optional[bool] = True, a_: Optional[int] = 0, a_: Optional[int] = 1, a_: Optional[int] = 2, **a_: Tuple, ):
'''simple docstring'''
_snake_case : Tuple = vocab_size
_snake_case : Union[str, Any] = hidden_size
_snake_case : List[str] = encoder_ffn_dim
_snake_case : List[str] = num_encoder_layers
_snake_case : str = num_encoder_attention_heads
_snake_case : List[str] = decoder_ffn_dim
_snake_case : Union[str, Any] = num_decoder_layers
_snake_case : Dict = num_decoder_attention_heads
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = init_std # Normal(0, this parameter)
_snake_case : Union[str, Any] = activation_function
# parameters for xlmprophetnet
_snake_case : Optional[Any] = ngram
_snake_case : List[Any] = num_buckets
_snake_case : str = relative_max_distance
_snake_case : Dict = disable_ngram_loss
_snake_case : Dict = eps
# 3 Types of Dropout
_snake_case : List[str] = attention_dropout
_snake_case : Tuple = activation_dropout
_snake_case : List[Any] = dropout
_snake_case : Tuple = use_cache
super().__init__(
pad_token_id=__UpperCamelCase, bos_token_id=__UpperCamelCase, eos_token_id=__UpperCamelCase, is_encoder_decoder=__UpperCamelCase, add_cross_attention=__UpperCamelCase, decoder_start_token_id=__UpperCamelCase, **__UpperCamelCase, )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_snake_case : List[Any] = json.load(snake_case__ )
_snake_case : Any = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# Load the entity vocab file
_snake_case : int = load_entity_vocab(snake_case__ )
_snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case : Optional[Any] = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
_snake_case : int = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
_snake_case : List[str] = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_snake_case : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
_snake_case : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_snake_case : Tuple = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_snake_case : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case : Optional[Any] = F"encoder.layer.{layer_index}.attention.self."
_snake_case : Dict = state_dict[prefix + matrix_name]
_snake_case : Any = state_dict[prefix + matrix_name]
_snake_case : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case : int = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_snake_case : Optional[int] = entity_emb[entity_vocab["""[MASK]"""]]
_snake_case : Union[str, Any] = LukeModel(config=snake_case__ ).eval()
_snake_case , _snake_case : Any = model.load_state_dict(snake_case__ , strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(snake_case__ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_snake_case : Tuple = LukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
_snake_case : Union[str, Any] = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_snake_case : Optional[int] = (39, 42)
_snake_case : Tuple = tokenizer(snake_case__ , entity_spans=[span] , add_prefix_space=snake_case__ , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_snake_case : Optional[Any] = torch.Size((1, 42, 10_24) )
_snake_case : Any = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_snake_case : List[str] = torch.Size((1, 42, 7_68) )
_snake_case : Optional[int] = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_snake_case : str = torch.Size((1, 1, 10_24) )
_snake_case : List[str] = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_snake_case : Optional[int] = torch.Size((1, 1, 7_68) )
_snake_case : List[Any] = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : Union[str, Any] = {}
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_snake_case , _snake_case : int = line.rstrip().split("""\t""" )
_snake_case : str = index
return entity_vocab
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
A_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A_ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
A_ = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_snake_case : Optional[Any] = bs[:]
_snake_case : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case : List[Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Any = set()
_snake_case : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case : Optional[Any] = char
return pairs
class lowercase( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: Tuple, a_: Union[str, Any], a_: Tuple, a_: Union[str, Any]="replace", a_: Optional[int]="<s>", a_: List[str]="</s>", a_: List[str]="</s>", a_: Union[str, Any]="<s>", a_: Tuple="<unk>", a_: List[Any]="<pad>", a_: Tuple="<mask>", a_: Optional[Any]=False, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : Union[str, Any] = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else bos_token
_snake_case : Union[str, Any] = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else eos_token
_snake_case : List[Any] = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else sep_token
_snake_case : List[str] = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else cls_token
_snake_case : Tuple = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else unk_token
_snake_case : Dict = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : Dict = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else mask_token
super().__init__(
errors=_lowercase, bos_token=_lowercase, eos_token=_lowercase, unk_token=_lowercase, sep_token=_lowercase, cls_token=_lowercase, pad_token=_lowercase, mask_token=_lowercase, add_prefix_space=_lowercase, **_lowercase, )
with open(_lowercase, encoding="""utf-8""" ) as vocab_handle:
_snake_case : str = json.load(_lowercase )
_snake_case : Optional[int] = {v: k for k, v in self.encoder.items()}
_snake_case : List[str] = errors # how to handle errors in decoding
_snake_case : Any = bytes_to_unicode()
_snake_case : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase, encoding="""utf-8""" ) as merges_handle:
_snake_case : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
_snake_case : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case : int = dict(zip(_lowercase, range(len(_lowercase ) ) ) )
_snake_case : Any = {}
_snake_case : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case : List[str] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_snake_case : str = tuple(_lowercase )
_snake_case : Optional[int] = get_pairs(_lowercase )
if not pairs:
return token
while True:
_snake_case : str = min(_lowercase, key=lambda a_ : self.bpe_ranks.get(_lowercase, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case : Tuple = bigram
_snake_case : Any = []
_snake_case : List[str] = 0
while i < len(_lowercase ):
try:
_snake_case : Any = word.index(_lowercase, _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case : Optional[int] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case : Optional[int] = tuple(_lowercase )
_snake_case : int = new_word
if len(_lowercase ) == 1:
break
else:
_snake_case : List[Any] = get_pairs(_lowercase )
_snake_case : Optional[int] = """ """.join(_lowercase )
_snake_case : List[str] = word
return word
def UpperCamelCase_ ( self: Union[str, Any], a_: int ):
'''simple docstring'''
_snake_case : Tuple = []
for token in re.findall(self.pat, _lowercase ):
_snake_case : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase_ ( self: Optional[int], a_: Tuple ):
'''simple docstring'''
return self.encoder.get(_lowercase, self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self: List[str], a_: Optional[Any] ):
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCamelCase_ ( self: str, a_: Optional[int] ):
'''simple docstring'''
_snake_case : Any = """""".join(_lowercase )
_snake_case : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""", errors=self.errors )
return text
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: Optional[int] = None ):
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : int = os.path.join(
_lowercase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Dict = os.path.join(
_lowercase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowercase, ensure_ascii=_lowercase ) + """\n""" )
_snake_case : List[Any] = 0
with open(_lowercase, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_snake_case : Dict = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: List[Any] = None, a_: Any = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase, token_ids_a=_lowercase, already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: str = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Tuple=False, **a_: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = kwargs.pop("""add_prefix_space""", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
_snake_case : int = """ """ + text
return (text, kwargs)
def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, Any], a_: Any = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self: str, a_: Optional[int] ):
'''simple docstring'''
_snake_case : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
_snake_case : Tuple = """ """.join(_lowercase )
_snake_case : Optional[int] = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
_snake_case : str = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = SwinvaConfig()
_snake_case : Dict = swinva_name.split("""_""" )
_snake_case : Any = name_split[1]
if "to" in name_split[3]:
_snake_case : str = int(name_split[3][-3:] )
else:
_snake_case : Union[str, Any] = int(name_split[3] )
if "to" in name_split[2]:
_snake_case : Any = int(name_split[2][-2:] )
else:
_snake_case : List[str] = int(name_split[2][6:] )
if model_size == "tiny":
_snake_case : Tuple = 96
_snake_case : Any = (2, 2, 6, 2)
_snake_case : Tuple = (3, 6, 12, 24)
elif model_size == "small":
_snake_case : Tuple = 96
_snake_case : Optional[Any] = (2, 2, 18, 2)
_snake_case : str = (3, 6, 12, 24)
elif model_size == "base":
_snake_case : List[Any] = 1_28
_snake_case : int = (2, 2, 18, 2)
_snake_case : int = (4, 8, 16, 32)
else:
_snake_case : Optional[int] = 1_92
_snake_case : Any = (2, 2, 18, 2)
_snake_case : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
_snake_case : Dict = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_snake_case : Optional[int] = 2_18_41
_snake_case : Dict = """huggingface/label-files"""
_snake_case : Optional[Any] = """imagenet-22k-id2label.json"""
_snake_case : Tuple = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Optional[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
else:
_snake_case : Optional[int] = 10_00
_snake_case : Any = """huggingface/label-files"""
_snake_case : Optional[int] = """imagenet-1k-id2label.json"""
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
_snake_case : List[Any] = img_size
_snake_case : List[str] = num_classes
_snake_case : List[Any] = embed_dim
_snake_case : Union[str, Any] = depths
_snake_case : List[Any] = num_heads
_snake_case : List[str] = window_size
return config
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
if "patch_embed.proj" in name:
_snake_case : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_snake_case : Optional[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
_snake_case : List[Any] = """encoder.""" + name
if "attn.proj" in name:
_snake_case : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
_snake_case : Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
_snake_case : Optional[int] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
_snake_case : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
_snake_case : List[str] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
_snake_case : Dict = """layernorm.weight"""
if name == "norm.bias":
_snake_case : Dict = """layernorm.bias"""
if "head" in name:
_snake_case : Optional[int] = name.replace("""head""" , """classifier""" )
else:
_snake_case : Dict = """swinv2.""" + name
return name
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : Tuple = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
_snake_case : Union[str, Any] = key.split(""".""" )
_snake_case : int = int(key_split[1] )
_snake_case : List[str] = int(key_split[3] )
_snake_case : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case : Any = val[:dim, :]
_snake_case : str = val[dim : dim * 2, :]
_snake_case : Optional[int] = val[-dim:, :]
else:
_snake_case : List[Any] = val[:dim]
_snake_case : Tuple = val[
dim : dim * 2
]
_snake_case : Union[str, Any] = val[-dim:]
else:
_snake_case : Optional[int] = val
return orig_state_dict
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[int] = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
_snake_case : Any = get_swinva_config(snake_case__ )
_snake_case : str = SwinvaForImageClassification(snake_case__ )
model.eval()
_snake_case : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
_snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Any = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
_snake_case : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : List[Any] = image_processor(images=snake_case__ , return_tensors="""pt""" )
_snake_case : List[Any] = timm_model(inputs["""pixel_values"""] )
_snake_case : List[Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase( lowercase__ ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
class lowercase( lowercase__ , lowercase__ ):
'''simple docstring'''
lowercase__ = 1
@register_to_config
def __init__( self: Dict, a_: List[Any] = 2_000, a_: Any = 0.15, a_: Optional[int] = 0.01, a_: Tuple = 1_348.0, a_: Union[str, Any] = 1E-5, a_: Any = 1, ):
'''simple docstring'''
_snake_case : Optional[int] = sigma_max
# setable values
_snake_case : str = None
self.set_sigmas(a_, a_, a_, a_ )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: List[Any] = None ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self: Optional[int], a_: Dict, a_: str = None, a_: Optional[Any] = None ):
'''simple docstring'''
_snake_case : List[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_snake_case : List[Any] = torch.linspace(1, a_, a_, device=a_ )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: Optional[int] = None, a_: List[str] = None, a_: int = None ):
'''simple docstring'''
_snake_case : Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
_snake_case : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
_snake_case : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a_, a_ )
_snake_case : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_snake_case : str = torch.exp(torch.linspace(math.log(a_ ), math.log(a_ ), a_ ) )
_snake_case : List[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase_ ( self: int, a_: Dict, a_: str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def UpperCamelCase_ ( self: Optional[Any], a_: Dict, a_: int, a_: List[Any], a_: str = None, a_: Optional[Any] = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_snake_case : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_snake_case : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_snake_case : Any = timesteps.to(self.discrete_sigmas.device )
_snake_case : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
_snake_case : Tuple = self.get_adjacent_sigma(a_, a_ ).to(sample.device )
_snake_case : Optional[Any] = torch.zeros_like(a_ )
_snake_case : Optional[int] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_snake_case : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_snake_case : Dict = diffusion.unsqueeze(-1 )
_snake_case : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_snake_case : str = randn_tensor(
sample.shape, layout=sample.layout, generator=a_, device=sample.device, dtype=sample.dtype )
_snake_case : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_snake_case : List[Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a_, prev_sample_mean=a_ )
def UpperCamelCase_ ( self: int, a_: str, a_: Optional[Any], a_: Any = None, a_: int = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_snake_case : Dict = randn_tensor(sample.shape, layout=sample.layout, generator=a_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_snake_case : Dict = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
_snake_case : Any = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
_snake_case : Optional[int] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_snake_case : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_snake_case : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_snake_case : Dict = step_size.unsqueeze(-1 )
_snake_case : Optional[int] = sample + step_size * model_output
_snake_case : str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[Any], a_: Any, ):
'''simple docstring'''
_snake_case : Any = timesteps.to(original_samples.device )
_snake_case : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_snake_case : Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a_ ) * sigmas[:, None, None, None]
)
_snake_case : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self: int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"""vocab_file""": """spiece.model"""}
A_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
A_ = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
A_ = """▁"""
class lowercase( UpperCamelCase_ ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: str, a_: List[Any], a_: Optional[Any]=True, a_: Optional[Any]=True, a_: int=False, a_: Any="[CLS]", a_: Tuple="[SEP]", a_: List[str]="<unk>", a_: List[Any]="[SEP]", a_: str="<pad>", a_: Dict="[CLS]", a_: Dict="[MASK]", a_: Optional[Dict[str, Any]] = None, **a_: Tuple, ):
'''simple docstring'''
_snake_case : int = (
AddedToken(__A, lstrip=__A, rstrip=__A, normalized=__A )
if isinstance(__A, __A )
else mask_token
)
_snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A, remove_space=__A, keep_accents=__A, bos_token=__A, eos_token=__A, unk_token=__A, sep_token=__A, pad_token=__A, cls_token=__A, mask_token=__A, sp_model_kwargs=self.sp_model_kwargs, **__A, )
_snake_case : Optional[Any] = do_lower_case
_snake_case : List[Any] = remove_space
_snake_case : Dict = keep_accents
_snake_case : Tuple = vocab_file
_snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.__dict__.copy()
_snake_case : List[Any] = None
return state
def __setstate__( self: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
_snake_case : List[Any] = {}
_snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
if self.remove_space:
_snake_case : Optional[Any] = " ".join(inputs.strip().split() )
else:
_snake_case : str = inputs
_snake_case : str = outputs.replace("""``""", """\"""" ).replace("""''""", """\"""" )
if not self.keep_accents:
_snake_case : Union[str, Any] = unicodedata.normalize("""NFKD""", __A )
_snake_case : int = "".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
_snake_case : Tuple = outputs.lower()
return outputs
def UpperCamelCase_ ( self: int, a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.preprocess_text(__A )
_snake_case : Dict = self.sp_model.encode(__A, out_type=__A )
_snake_case : str = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_snake_case : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A, """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case : Optional[int] = cur_pieces[1:]
else:
_snake_case : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def UpperCamelCase_ ( self: Tuple, a_: str ):
'''simple docstring'''
return self.sp_model.PieceToId(__A )
def UpperCamelCase_ ( self: Tuple, a_: Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__A )
def UpperCamelCase_ ( self: Tuple, a_: List[Any] ):
'''simple docstring'''
_snake_case : str = []
_snake_case : str = ""
_snake_case : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_snake_case : int = True
_snake_case : int = []
else:
current_sub_tokens.append(__A )
_snake_case : Optional[Any] = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def UpperCamelCase_ ( self: Optional[int], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self: Tuple, a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A, token_ids_a=__A, already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def UpperCamelCase_ ( self: str, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Any = [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : List[Any] = os.path.join(
__A, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A, """wb""" ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
A_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case : Stack[int] = Stack()
_snake_case : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
_snake_case : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
_snake_case : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_snake_case : str = operand_stack.peek()
operand_stack.pop()
_snake_case : int = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
A_ = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowercase( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = SqueezeBertTokenizer
def __init__( self: Optional[int], a_: Optional[Any]=None, a_: Optional[int]=None, a_: str=True, a_: Any="[UNK]", a_: str="[SEP]", a_: Optional[Any]="[PAD]", a_: List[str]="[CLS]", a_: List[str]="[MASK]", a_: Optional[Any]=True, a_: Optional[Any]=None, **a_: Any, ):
'''simple docstring'''
super().__init__(
_a, tokenizer_file=_a, do_lower_case=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, tokenize_chinese_chars=_a, strip_accents=_a, **_a, )
_snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", _a ) != do_lower_case
or normalizer_state.get("""strip_accents""", _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", _a ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(_a, normalizer_state.pop("""type""" ) )
_snake_case : Union[str, Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**_a )
_snake_case : Any = do_lower_case
def UpperCamelCase_ ( self: int, a_: Dict, a_: Dict=None ):
'''simple docstring'''
_snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: Any, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Dict = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(__snake_case , 0 , len(__snake_case ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : str = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case , i + t , (__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
A_ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
A_ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
A_ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="""https://github.com/google-research/bleurt""", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), codebase_urls=["""https://github.com/google-research/bleurt"""], reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""], )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_snake_case : int = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_snake_case : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_snake_case : Dict = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
_snake_case : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_snake_case : Optional[Any] = score.BleurtScorer(os.path.join(lowercase_, lowercase_ ) )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict ):
'''simple docstring'''
_snake_case : int = self.scorer.score(references=lowercase_, candidates=lowercase_ )
return {"scores": scores}
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase:
'''simple docstring'''
lowercase__ = BlenderbotConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self: List[str], a_: List[str], a_: Tuple=13, a_: int=7, a_: str=True, a_: Optional[Any]=False, a_: Union[str, Any]=99, a_: int=32, a_: Optional[int]=2, a_: List[Any]=4, a_: Any=37, a_: int=0.1, a_: str=0.1, a_: List[Any]=20, a_: Optional[int]=2, a_: List[str]=1, a_: Tuple=0, ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Any = is_training
_snake_case : Optional[int] = use_labels
_snake_case : Any = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : str = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Any = bos_token_id
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
_snake_case : Any = tf.concat([input_ids, eos_tensor], axis=1 )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : str = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_snake_case : Optional[Any] = prepare_blenderbot_inputs_dict(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int], a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder()
_snake_case : Any = inputs_dict["input_ids"]
_snake_case : Tuple = input_ids[:1, :]
_snake_case : Dict = inputs_dict["attention_mask"][:1, :]
_snake_case : List[Any] = inputs_dict["head_mask"]
_snake_case : Any = 1
# first forward pass
_snake_case : Optional[Any] = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, head_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__ )
_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
_snake_case : int = tf.concat([input_ids, next_tokens], axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
_snake_case : Tuple = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__ )[0]
_snake_case : List[Any] = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
_snake_case : int = int(ids_tensor((1,), output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__, lowerCAmelCase__, rtol=1E-3 )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , snake_case__ : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : str = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Union[str, Any] = TFBlenderbotModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self, config_class=lowerCAmelCase__ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_tokenizers
@require_tf
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = ["""My friends are cool but they eat too many carbs."""]
lowercase__ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(self.src_text, return_tensors="""tf""" )
_snake_case : Union[str, Any] = self.model.generate(
model_inputs.input_ids, )
_snake_case : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
A_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case : Optional[Any] = year // 1_00
_snake_case : Optional[Any] = (5 * (century % 4) + 2) % 7
_snake_case : Tuple = year % 1_00
_snake_case : Optional[Any] = centurian % 12
_snake_case : int = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case : Dict = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
A_ = '''src/transformers'''
# Matches is_xxx_available()
A_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A_ = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
A_ = re.compile(r'''^\s*else:''')
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
if _re_test_backend.search(_snake_case ) is None:
return None
_snake_case : Union[str, Any] = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Optional[int] = f.readlines()
_snake_case : Optional[Any] = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_snake_case : Dict = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_snake_case : int = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_snake_case : Union[str, Any] = re.findall("""\[([^\]]+)\]""" , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_snake_case : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_snake_case : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_snake_case : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_snake_case : str = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_snake_case : int = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(""", """ )
_snake_case : Optional[int] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_snake_case : List[Any] = _re_between_brackets.search(_snake_case ).groups()[0].split(""", """ )
_snake_case : List[Any] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_snake_case : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case : Optional[int] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_snake_case : List[Any] = lines[line_index]
_snake_case : int = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case : Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_snake_case : int = lines[line_index]
_snake_case : Any = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[Any] ):
"""simple docstring"""
def find_duplicates(snake_case__ : Optional[Any] ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case : List[Any] = []
for key in import_dict_objects.keys():
_snake_case : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
_snake_case : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case : Dict = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_snake_case : List[str] = os.path.join(_snake_case , """__init__.py""" )
_snake_case : Tuple = parse_init(_snake_case )
if objects is not None:
_snake_case : str = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_snake_case : int = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError("""\n\n""".join(_snake_case ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_snake_case : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_snake_case : Dict = short_path.replace(os.path.sep , """.""" )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_snake_case : int = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_snake_case : Dict = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_snake_case )
return submodules
A_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_snake_case , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_snake_case : Tuple = spec.loader.load_module()
_snake_case : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
_snake_case : Tuple = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
A_ = os.path.join(torch_cache_home, '''transformers''')
A_ = 'https://cdn.huggingface.co'
A_ = 'https://s3.amazonaws.com/models.huggingface.co/bert'
A_ = '/'.join(str(Path(__file__).resolve()).split('''/''')[:-1])
A_ = os.path.join(PATH, '''config.yaml''')
A_ = os.path.join(PATH, '''attributes.txt''')
A_ = os.path.join(PATH, '''objects.txt''')
A_ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
A_ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
A_ = 'pytorch_model.bin'
A_ = 'config.yaml'
def UpperCAmelCase__ (snake_case__ : Union[str, Any]=OBJECTS , snake_case__ : List[Any]=ATTRIBUTES ):
"""simple docstring"""
_snake_case : Tuple = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
_snake_case : Optional[Any] = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = OrderedDict()
with open(_lowercase , """rb""" ) as f:
_snake_case : List[Any] = pkl.load(_lowercase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
_snake_case : Dict = ckp.pop(_lowercase )
if isinstance(_lowercase , np.ndarray ):
_snake_case : Optional[int] = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase , torch.tensor ), type(_lowercase )
_snake_case : Optional[Any] = v
return r
class lowercase:
'''simple docstring'''
lowercase__ = {}
def __init__( self: List[Any], a_: dict, a_: str = "root", a_: Dict=0 ):
'''simple docstring'''
_snake_case : Dict = name
_snake_case : Tuple = level
_snake_case : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
_snake_case : Optional[int] = copy.deepcopy(UpperCamelCase__ )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
_snake_case : Optional[Any] = Config(UpperCamelCase__, name=UpperCamelCase__, level=level + 1 )
_snake_case : Optional[int] = v
setattr(self, UpperCamelCase__, UpperCamelCase__ )
_snake_case : Tuple = d
def __repr__( self: List[Any] ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self: int, a_: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = val
_snake_case : Optional[Any] = val
_snake_case : Optional[int] = key.split(""".""" )
_snake_case : List[str] = len(UpperCamelCase__ ) - 1
_snake_case : List[str] = self._pointer
if len(UpperCamelCase__ ) > 1:
for i, l in enumerate(UpperCamelCase__ ):
if hasattr(self, UpperCamelCase__ ) and isinstance(getattr(self, UpperCamelCase__ ), UpperCamelCase__ ):
setattr(getattr(self, UpperCamelCase__ ), """.""".join(levels[i:] ), UpperCamelCase__ )
if l == last_level:
_snake_case : Tuple = val
else:
_snake_case : Any = pointer[l]
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self._pointer
def UpperCamelCase_ ( self: str, a_: Optional[int], a_: Optional[Any] ):
'''simple docstring'''
with open(f"{file_name}", """w""" ) as stream:
dump(UpperCamelCase__, UpperCamelCase__ )
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: List[str] ):
'''simple docstring'''
with open(f"{file_name}", """w""" ) as stream:
json.dump(UpperCamelCase__, UpperCamelCase__ )
@staticmethod
def UpperCamelCase_ ( a_: Dict ):
'''simple docstring'''
with open(UpperCamelCase__ ) as stream:
_snake_case : Tuple = load(UpperCamelCase__, Loader=UpperCamelCase__ )
return data
def __str__( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ''' '''
if self._name != "root":
_snake_case : Optional[int] = f"{t * (self._level-1)}{self._name}:\n"
else:
_snake_case : int = ''''''
_snake_case : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCamelCase__ ).__name__})\n"
_snake_case : Optional[Any] = level
return r[:-1]
@classmethod
def UpperCamelCase_ ( cls: Dict, a_: str, **a_: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = cls.get_config_dict(UpperCamelCase__, **UpperCamelCase__ )
return cls(UpperCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: str, **a_: str ):
'''simple docstring'''
_snake_case : Dict = kwargs.pop("""cache_dir""", UpperCamelCase__ )
_snake_case : str = kwargs.pop("""force_download""", UpperCamelCase__ )
_snake_case : Tuple = kwargs.pop("""resume_download""", UpperCamelCase__ )
_snake_case : List[str] = kwargs.pop("""proxies""", UpperCamelCase__ )
_snake_case : Tuple = kwargs.pop("""local_files_only""", UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
_snake_case : Optional[Any] = os.path.join(UpperCamelCase__, UpperCamelCase__ )
elif os.path.isfile(UpperCamelCase__ ) or is_remote_url(UpperCamelCase__ ):
_snake_case : Any = pretrained_model_name_or_path
else:
_snake_case : Optional[Any] = hf_bucket_url(UpperCamelCase__, filename=UpperCamelCase__, use_cdn=UpperCamelCase__ )
try:
# Load from URL or cache if already cached
_snake_case : Optional[Any] = cached_path(
UpperCamelCase__, cache_dir=UpperCamelCase__, force_download=UpperCamelCase__, proxies=UpperCamelCase__, resume_download=UpperCamelCase__, local_files_only=UpperCamelCase__, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_snake_case : List[Any] = Config.load_yaml(UpperCamelCase__ )
except EnvironmentError:
_snake_case : Dict = '''Can\'t load config for'''
raise EnvironmentError(UpperCamelCase__ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(UpperCamelCase__ ), kwargs
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : List[Any] = torch.load("""dump.pt""" , map_location=in_tensor.device )
_snake_case : Tuple = in_tensor.numpy()
_snake_case : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[str] = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int=True ):
"""simple docstring"""
_snake_case : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_snake_case : Optional[int] = '''/''' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any=None , snake_case__ : str=0 , snake_case__ : str=None , ):
"""simple docstring"""
_snake_case : List[str] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase , _lowercase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowercase , _lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase , _lowercase ):
ua += "; " + user_agent
_snake_case : List[str] = {'''user-agent''': ua}
if resume_size > 0:
_snake_case : Dict = '''bytes=%d-''' % (resume_size,)
_snake_case : Tuple = requests.get(_lowercase , stream=_lowercase , proxies=_lowercase , headers=_lowercase )
if response.status_code == 4_16: # Range not satisfiable
return
_snake_case : Dict = response.headers.get("""Content-Length""" )
_snake_case : List[Any] = resume_size + int(_lowercase ) if content_length is not None else None
_snake_case : Optional[Any] = tqdm(
unit="""B""" , unit_scale=_lowercase , total=_lowercase , initial=_lowercase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Optional[int]=None , snake_case__ : List[str]=False , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=10 , snake_case__ : List[str]=False , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=False , ):
"""simple docstring"""
if cache_dir is None:
_snake_case : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
_snake_case : Optional[Any] = str(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
_snake_case : str = None
if not local_files_only:
try:
_snake_case : int = requests.head(_lowercase , allow_redirects=_lowercase , proxies=_lowercase , timeout=_lowercase )
if response.status_code == 2_00:
_snake_case : List[Any] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_snake_case : Optional[int] = url_to_filename(_lowercase , _lowercase )
# get cache path to put the file
_snake_case : Union[str, Any] = os.path.join(_lowercase , _lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
_snake_case : Any = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_snake_case : List[str] = cache_path + '''.lock'''
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_snake_case : int = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowercase , """a+b""" ) as f:
yield f
_snake_case : List[Any] = _resumable_file_manager
if os.path.exists(_lowercase ):
_snake_case : List[Any] = os.stat(_lowercase ).st_size
else:
_snake_case : Dict = 0
else:
_snake_case : Any = partial(tempfile.NamedTemporaryFile , dir=_lowercase , delete=_lowercase )
_snake_case : List[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _lowercase , temp_file.name , )
http_get(
_lowercase , _lowercase , proxies=_lowercase , resume_size=_lowercase , user_agent=_lowercase , )
os.replace(temp_file.name , _lowercase )
_snake_case : Optional[int] = {'''url''': url, '''etag''': etag}
_snake_case : int = cache_path + '''.json'''
with open(_lowercase , """w""" ) as meta_file:
json.dump(_lowercase , _lowercase )
return cache_path
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[int]=None ):
"""simple docstring"""
_snake_case : Optional[int] = url.encode("""utf-8""" )
_snake_case : Optional[Any] = shaaaa(_lowercase )
_snake_case : Optional[Any] = url_hash.hexdigest()
if etag:
_snake_case : Any = etag.encode("""utf-8""" )
_snake_case : Dict = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple=None , snake_case__ : str=False , snake_case__ : List[str]=None , snake_case__ : Tuple=False , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=False , snake_case__ : int=False , snake_case__ : List[str]=False , ):
"""simple docstring"""
if cache_dir is None:
_snake_case : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
_snake_case : int = str(_lowercase )
if isinstance(_lowercase , _lowercase ):
_snake_case : int = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
_snake_case : List[Any] = get_from_cache(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , user_agent=_lowercase , local_files_only=_lowercase , )
elif os.path.exists(_lowercase ):
# File, and it exists.
_snake_case : Optional[Any] = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowercase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_snake_case : str = os.path.split(_lowercase )
_snake_case : Any = output_file.replace(""".""" , """-""" ) + '''-extracted'''
_snake_case : List[str] = os.path.join(_lowercase , _lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_snake_case : int = output_path + '''.lock'''
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase , """r""" ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
_snake_case : Dict = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowercase ) )
return output_path_extracted
return output_path
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : int="," ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
_snake_case : Union[str, Any] = eval(f.read() )
else:
_snake_case : Tuple = requests.get(_lowercase )
try:
_snake_case : str = requests.json()
except Exception:
_snake_case : Tuple = req.content.decode()
assert data is not None, "could not connect"
try:
_snake_case : List[str] = eval(_lowercase )
except Exception:
_snake_case : Optional[int] = data.split("""\n""" )
req.close()
return data
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Any = requests.get(_lowercase )
_snake_case : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase , """rb""" ) as stream:
_snake_case : int = pkl.load(_lowercase )
_snake_case : str = weights.pop("""model""" )
_snake_case : Union[str, Any] = {}
for k, v in model.items():
_snake_case : List[Any] = torch.from_numpy(_lowercase )
if "running_var" in k:
_snake_case : Optional[Any] = torch.tensor([0] )
_snake_case : int = k.replace("""running_var""" , """num_batches_tracked""" )
_snake_case : int = zero
return new
def UpperCAmelCase__ ():
"""simple docstring"""
print(F"{os.path.abspath(os.path.join(_lowercase , os.pardir ) )}/demo.ipynb" )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Any="RGB" ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
_snake_case : Any = cva.imread(_lowercase )
else:
_snake_case : Union[str, Any] = get_image_from_url(_lowercase )
assert img is not None, F"could not connect to: {im}"
_snake_case : int = cva.cvtColor(_lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_snake_case : Optional[Any] = img[:, :, ::-1]
return img
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(_lowercase ) , _lowercase ))
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int] = 16 ):
"""simple docstring"""
_snake_case : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : List[str] = DatasetDict(
{
"""train""": dataset["""train"""].select(__UpperCamelCase ),
"""validation""": dataset["""train"""].select(__UpperCamelCase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(snake_case__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Tuple = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : int = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[Any] = 8
else:
_snake_case : Any = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_snake_case : Any = DataLoader(
tokenized_datasets["""test"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = []
# Download the dataset
_snake_case : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_snake_case : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_snake_case : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Any = config["""lr"""]
_snake_case : Dict = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Dict = int(config["""batch_size"""] )
_snake_case : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_snake_case : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
_snake_case : int = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
# New Code #
# Create our folds:
_snake_case : Optional[Any] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_snake_case : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__UpperCamelCase ):
_snake_case , _snake_case , _snake_case : Optional[Any] = get_fold_dataloaders(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Dict = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
_snake_case : Dict = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : Dict = model(**__UpperCamelCase )
_snake_case : Union[str, Any] = outputs.loss
_snake_case : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Optional[Any] = model(**__UpperCamelCase )
_snake_case : int = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
_snake_case : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_snake_case : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**__UpperCamelCase )
_snake_case : Optional[int] = outputs.logits
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_snake_case : Tuple = torch.cat(__UpperCamelCase , dim=0 )
_snake_case : str = torch.stack(__UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_snake_case : int = metric.compute(predictions=__UpperCamelCase , references=__UpperCamelCase )
accelerator.print("""Average test metrics from all folds:""" , __UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__UpperCamelCase , default=3 , help="""The number of splits to perform across the dataset""" )
_snake_case : Tuple = parser.parse_args()
_snake_case : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : str = SwinConfig(image_size=1_92 )
if "base" in model_name:
_snake_case : str = 6
_snake_case : Any = 1_28
_snake_case : Optional[Any] = (2, 2, 18, 2)
_snake_case : Any = (4, 8, 16, 32)
elif "large" in model_name:
_snake_case : Optional[int] = 12
_snake_case : Optional[int] = 1_92
_snake_case : int = (2, 2, 18, 2)
_snake_case : Any = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
_snake_case : Optional[Any] = window_size
_snake_case : Union[str, Any] = embed_dim
_snake_case : Tuple = depths
_snake_case : str = num_heads
return config
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "encoder.mask_token" in name:
_snake_case : int = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
_snake_case : Optional[Any] = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
_snake_case : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_snake_case : List[str] = """layernorm.weight"""
if name == "encoder.norm.bias":
_snake_case : int = """layernorm.bias"""
if "decoder" in name:
pass
else:
_snake_case : str = """swin.""" + name
return name
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : int = orig_state_dict.pop(snake_case__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_snake_case : Dict = key.split(""".""" )
_snake_case : List[Any] = int(key_split[2] )
_snake_case : Tuple = int(key_split[4] )
_snake_case : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case : Any = val[:dim, :]
_snake_case : Any = val[
dim : dim * 2, :
]
_snake_case : Dict = val[-dim:, :]
else:
_snake_case : Tuple = val[
:dim
]
_snake_case : Optional[Any] = val[
dim : dim * 2
]
_snake_case : Union[str, Any] = val[
-dim:
]
else:
_snake_case : str = val
return orig_state_dict
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
_snake_case : Tuple = get_swin_config(snake_case__ )
_snake_case : str = SwinForMaskedImageModeling(snake_case__ )
model.eval()
_snake_case : Tuple = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
_snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} )
_snake_case : int = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : Tuple = image_processor(images=snake_case__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Any = model(**snake_case__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : int = inspect.getfile(accelerate.test_utils )
_snake_case : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
_snake_case : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
_snake_case : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
_snake_case : int = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
_snake_case : Optional[int] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
_snake_case : Optional[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__lowercase, env=os.environ.copy() )
if __name__ == "__main__":
A_ = Accelerator()
A_ = (accelerator.state.process_index + 2, 10)
A_ = torch.randint(0, 10, shape).to(accelerator.device)
A_ = ''''''
A_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase( __snake_case ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_, """tf_padding""" ) )
self.parent.assertTrue(hasattr(A_, """depth_multiplier""" ) )
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: Optional[Any], a_: Tuple=13, a_: Optional[Any]=3, a_: int=32, a_: Dict=0.25, a_: List[str]=8, a_: Tuple=True, a_: Union[str, Any]=1_024, a_: str=32, a_: Optional[Any]="relu6", a_: Union[str, Any]=0.1, a_: Optional[int]=0.02, a_: List[str]=True, a_: List[str]=True, a_: str=10, a_: str=None, ):
'''simple docstring'''
_snake_case : Any = parent
_snake_case : Optional[int] = batch_size
_snake_case : Optional[int] = num_channels
_snake_case : Tuple = image_size
_snake_case : Any = depth_multiplier
_snake_case : Any = min_depth
_snake_case : Dict = tf_padding
_snake_case : int = int(last_hidden_size * depth_multiplier )
_snake_case : Optional[int] = output_stride
_snake_case : Any = hidden_act
_snake_case : Any = classifier_dropout_prob
_snake_case : Optional[int] = use_labels
_snake_case : List[str] = is_training
_snake_case : List[Any] = num_labels
_snake_case : Optional[Any] = initializer_range
_snake_case : Union[str, Any] = scope
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : int = None
_snake_case : Dict = None
if self.use_labels:
_snake_case : Any = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: Dict, a_: int, a_: Optional[int], a_: Optional[int], a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
_snake_case : List[Any] = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any], a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.num_labels
_snake_case : List[str] = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
_snake_case : Tuple = model(A_, labels=A_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
_snake_case : int = config_and_inputs
_snake_case : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = MobileNetVaModelTester(self )
_snake_case : List[str] = MobileNetVaConfigTester(self, config_class=A_, has_text_modality=A_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(A_ )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
def check_hidden_states_output(a_: Tuple, a_: Optional[Any], a_: Any ):
_snake_case : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_snake_case : str = model(**self._prepare_for_class(A_, A_ ) )
_snake_case : Union[str, Any] = outputs.hidden_states
_snake_case : List[str] = 26
self.assertEqual(len(A_ ), A_ )
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = True
check_hidden_states_output(A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[Any] = True
check_hidden_states_output(A_, A_, A_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(A_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : Any = image_processor(images=A_, return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_snake_case : Any = model(**A_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape, A_ )
_snake_case : Any = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A_, atol=1E-4 ) )
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self: Tuple, *,
a_: Optional[Any] = 4, a_: str = 768, a_: Optional[int], a_: Any, ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Parameter(torch.zeros(a_ ) )
# parameters for additional clip time embeddings
_snake_case : int = nn.Linear(a_, a_ )
_snake_case : Union[str, Any] = nn.Linear(a_, a_ )
# parameters for encoder hidden states
_snake_case : List[str] = clip_extra_context_tokens
_snake_case : Dict = nn.Linear(
a_, self.clip_extra_context_tokens * cross_attention_dim )
_snake_case : Optional[Any] = nn.Linear(a_, a_ )
_snake_case : int = nn.LayerNorm(a_ )
def UpperCamelCase_ ( self: List[str], *, a_: List[Any], a_: str, a_: Tuple, a_: List[str] ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_snake_case : str = image_embeddings.shape[0]
_snake_case : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_snake_case : Dict = classifier_free_guidance_embeddings.expand(
a_, -1 )
_snake_case : Optional[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_snake_case : Optional[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_snake_case : Tuple = self.embedding_proj(a_ )
_snake_case : Any = self.clip_image_embeddings_project_to_time_embeddings(a_ )
_snake_case : Optional[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_snake_case : Dict = self.clip_extra_context_tokens_proj(a_ )
_snake_case : List[str] = clip_extra_context_tokens.reshape(a_, -1, self.clip_extra_context_tokens )
_snake_case : List[Any] = clip_extra_context_tokens.permute(0, 2, 1 )
_snake_case : Union[str, Any] = self.encoder_hidden_states_proj(a_ )
_snake_case : List[Any] = self.text_encoder_hidden_states_norm(a_ )
_snake_case : Optional[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCAmelCase__ (snake_case__ : Any = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
_snake_case : List[Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
return F"{i * ' '}*" if i else "\n##"
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(__lowerCAmelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCAmelCase__ (snake_case__ : Any = "." ):
"""simple docstring"""
_snake_case : Tuple = ""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
_snake_case : List[Any] = os.path.split(__lowerCAmelCase )
if filepath != old_path:
_snake_case : Union[str, Any] = print_path(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_snake_case : Tuple = F"{filepath}/{filename}".replace(""" """ , """%20""" )
_snake_case : Any = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"{md_prefix(__lowerCAmelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('''.''')
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return {}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = """<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"""
_snake_case : Optional[int] = """\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n """
return [html_string_a, html_string_a]
@require_bsa
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Dict = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.feature_extraction_class()
# Test not batched input
_snake_case : Optional[int] = get_html_strings()[0]
_snake_case : Optional[int] = feature_extractor(lowerCAmelCase_ )
# fmt: off
_snake_case : Optional[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
_snake_case : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes, lowerCAmelCase_ )
self.assertEqual(encoding.xpaths, lowerCAmelCase_ )
# Test batched
_snake_case : Any = get_html_strings()
_snake_case : List[Any] = feature_extractor(lowerCAmelCase_ )
# fmt: off
_snake_case : Tuple = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
_snake_case : Tuple = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ), 2 )
self.assertEqual(len(encoding.xpaths ), 2 )
self.assertEqual(encoding.nodes, lowerCAmelCase_ )
self.assertEqual(encoding.xpaths, lowerCAmelCase_ )
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase__ (snake_case__ : dict[int, list[int]] ):
"""simple docstring"""
_snake_case : Any = 0
_snake_case : int = len(UpperCamelCase__ ) # No of vertices in graph
_snake_case : Any = [0] * n
_snake_case : str = [False] * n
def dfs(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
_snake_case : Dict = True
_snake_case : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
_snake_case : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_snake_case : Union[str, Any] = min(low[at] , low[to] )
_snake_case : list[tuple[int, int]] = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase__ (snake_case__ : int=32 , snake_case__ : List[str]=10 , snake_case__ : Optional[int]=1_00 , snake_case__ : List[Any]=10_26 , snake_case__ : List[str]=True , snake_case__ : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , snake_case__ : List[str]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
_snake_case , _snake_case : Tuple = generate_datasets(
_UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=10_26 , trim=_UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_snake_case : int = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_snake_case : List[Any] = load_gpta("""gpt2""" ).to(_UpperCamelCase )
print("""computing perplexity on objective set""" )
_snake_case : Optional[int] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item()
print("""perplexity on objective set:""" , _UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int]=15 , snake_case__ : List[Any]=1_28 , snake_case__ : Any=1_00 , snake_case__ : Union[str, Any]="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
_snake_case : List[str] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_snake_case : Optional[Any] = SecondaryLearner(_UpperCamelCase )
# Train secondary learner
_snake_case : Dict = train_secondary_learner(
_UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=1_00 , igf_model_path=_UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[Any]=32 , snake_case__ : Any=10_00 , snake_case__ : List[Any]=16 , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=recopy_gpta , snake_case__ : List[Any]=None , snake_case__ : int=10 , snake_case__ : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
_snake_case : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_snake_case : str = RandomSampler(_UpperCamelCase )
_snake_case : Optional[Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase )
_snake_case : List[str] = max_steps // (len(_UpperCamelCase )) + 1
_snake_case : int = 0
_snake_case : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCamelCase )
secondary_learner.eval()
_snake_case : Optional[int] = []
_snake_case : Optional[int] = 0
_snake_case : Optional[int] = []
_snake_case : Tuple = []
# Compute the performance of the transformer model at the beginning
_snake_case : Tuple = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print("""Test perplexity, step""" , _UpperCamelCase , """:""" , _UpperCamelCase )
for epoch in range(int(_UpperCamelCase ) ):
for step, example in enumerate(_UpperCamelCase ):
torch.cuda.empty_cache()
_snake_case : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
_snake_case : Any = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_snake_case : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
_snake_case : Optional[int] = True
if secondary_learner is not None:
_snake_case : List[str] = secondary_learner.forward(
torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_snake_case : Optional[Any] = -1
if predicted_q < threshold:
_snake_case : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_snake_case : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_snake_case : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_snake_case : Any = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print("""Test perplexity, step""" , _UpperCamelCase , """:""" , _UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=_UpperCamelCase , default=_UpperCamelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=_UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_00 , type=_UpperCamelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_00 , type=_UpperCamelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=10_00 , type=_UpperCamelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_28 , type=_UpperCamelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=_UpperCamelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=_UpperCamelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_00 , type=_UpperCamelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=10_26 , type=_UpperCamelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=_UpperCamelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=_UpperCamelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_UpperCamelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_UpperCamelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_snake_case : List[Any] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_snake_case : Optional[Any] = training_secondary_learner(
_UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_snake_case : Union[str, Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_snake_case , _snake_case : Union[str, Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=_UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28 | 0 |
"""simple docstring"""
A_ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = set()
# keep track of all the paths to be checked
_snake_case : Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_snake_case : int = queue.pop(0 )
# get the last node from the path
_snake_case : Dict = path[-1]
if node not in explored:
_snake_case : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_snake_case : Optional[Any] = list(snake_case__ )
new_path.append(snake_case__ )
queue.append(snake_case__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case__ )
# in case there's no path between the 2 nodes
return []
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : Tuple , snake_case__ : str ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_snake_case : int = [start]
_snake_case : Optional[int] = set(snake_case__ )
# Keep tab on distances from `start` node.
_snake_case : List[str] = {start: 0, target: -1}
while queue:
_snake_case : List[str] = queue.pop(0 )
if node == target:
_snake_case : List[str] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case__ )
queue.append(snake_case__ )
_snake_case : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = KandinskyInpaintPipeline
lowercase__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase__ = False
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_005, )
_snake_case : int = MultilingualCLIP(a_ )
_snake_case : Optional[int] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : int = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_snake_case : str = UNetaDConditionModel(**a_ )
return model
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.dummy_text_encoder
_snake_case : Optional[int] = self.dummy_tokenizer
_snake_case : List[Any] = self.dummy_unet
_snake_case : Tuple = self.dummy_movq
_snake_case : List[Any] = DDIMScheduler(
num_train_timesteps=1_000, beta_schedule="""linear""", beta_start=0.00_085, beta_end=0.012, clip_sample=a_, set_alpha_to_one=a_, steps_offset=1, prediction_type="""epsilon""", thresholding=a_, )
_snake_case : int = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Any=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, self.cross_attention_dim), rng=random.Random(a_ ) ).to(a_ )
_snake_case : Tuple = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
_snake_case : Any = floats_tensor((1, 3, 64, 64), rng=random.Random(a_ ) ).to(a_ )
_snake_case : int = image.cpu().permute(0, 2, 3, 1 )[0]
_snake_case : int = Image.fromarray(np.uinta(a_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_snake_case : List[Any] = np.ones((64, 64), dtype=np.floataa )
_snake_case : Union[str, Any] = 0
if str(a_ ).startswith("""mps""" ):
_snake_case : Optional[Any] = torch.manual_seed(a_ )
else:
_snake_case : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
_snake_case : List[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = """cpu"""
_snake_case : Dict = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**a_ )
_snake_case : Any = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
_snake_case : Optional[int] = output.images
_snake_case : Any = pipe(
**self.get_dummy_inputs(a_ ), return_dict=a_, )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
_snake_case : Any = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_snake_case : str = np.ones((768, 768), dtype=np.floataa )
_snake_case : List[str] = 0
_snake_case : Optional[int] = """a hat"""
_snake_case : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""", torch_dtype=torch.floataa )
pipe_prior.to(a_ )
_snake_case : Any = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""", torch_dtype=torch.floataa )
_snake_case : int = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
_snake_case : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_snake_case , _snake_case : Optional[Any] = pipe_prior(
a_, generator=a_, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
_snake_case : int = pipeline(
a_, image=a_, mask_image=a_, image_embeds=a_, negative_image_embeds=a_, generator=a_, num_inference_steps=100, height=768, width=768, output_type="""np""", )
_snake_case : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_, a_ )
| 700 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for old_item in old_list:
_snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
_snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
_snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
_snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
_snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
_snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ):
"""simple docstring"""
_snake_case : Dict = []
for old_item in old_list:
_snake_case : Dict = old_item
_snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
_snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case : Union[str, Any] = old_checkpoint[path]
_snake_case : Optional[int] = old_tensor.shape[0] // 3
_snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 )
_snake_case : Union[str, Any] = query.reshape(snake_case__ )
_snake_case : Tuple = key.reshape(snake_case__ )
_snake_case : Any = value.reshape(snake_case__ )
for path in paths:
_snake_case : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case : Optional[Any] = old_checkpoint[path["""old"""]]
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = checkpoint["""time_embed.0.weight"""]
_snake_case : List[str] = checkpoint["""time_embed.0.bias"""]
_snake_case : List[str] = checkpoint["""time_embed.2.weight"""]
_snake_case : Tuple = checkpoint["""time_embed.2.bias"""]
_snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""]
_snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
_snake_case : List[Any] = checkpoint["""out.0.weight"""]
_snake_case : Any = checkpoint["""out.0.bias"""]
_snake_case : Any = checkpoint["""out.2.weight"""]
_snake_case : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_snake_case : Any = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
_snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_snake_case : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
_snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_snake_case : List[Any] = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
_snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_snake_case : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_snake_case : Dict = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_snake_case : Optional[int] = renew_resnet_paths(snake_case__ )
_snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : List[str] = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : Optional[int] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
_snake_case : int = middle_blocks[0]
_snake_case : List[str] = middle_blocks[1]
_snake_case : Any = middle_blocks[2]
_snake_case : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
_snake_case : Dict = renew_attention_paths(snake_case__ )
_snake_case : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
_snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
_snake_case : Dict = i % (config["""num_res_blocks"""] + 1)
_snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
_snake_case : Any = {}
for layer in output_block_layers:
_snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
_snake_case : str = [layer_name]
if len(snake_case__ ) > 1:
_snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_snake_case : List[Any] = renew_resnet_paths(snake_case__ )
_snake_case : int = renew_resnet_paths(snake_case__ )
_snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_snake_case : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_snake_case : Optional[int] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
_snake_case : Any = []
if len(snake_case__ ):
_snake_case : str = renew_attention_paths(snake_case__ )
_snake_case : str = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_snake_case : int = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
_snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
_snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
_snake_case : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A_ = parser.parse_args()
A_ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A_ = json.loads(f.read())
A_ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A_ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase( a__ ):
'''simple docstring'''
lowercase__ = """informer"""
lowercase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self: Optional[Any], a_: Union[str, Any] = None, a_: List[str] = None, a_: int = "student_t", a_: Union[str, Any] = "nll", a_: int = 1, a_: Optional[Any] = None, a_: Optional[int] = "mean", a_: Any = 0, a_: str = 0, a_: str = 0, a_: Union[str, Any] = 0, a_: Dict = None, a_: List[Any] = None, a_: Dict = 64, a_: Optional[Any] = 32, a_: List[str] = 32, a_: int = 2, a_: List[str] = 2, a_: Union[str, Any] = 2, a_: Union[str, Any] = 2, a_: List[Any] = True, a_: Optional[Any] = "gelu", a_: Dict = 0.05, a_: str = 0.1, a_: Any = 0.1, a_: Any = 0.1, a_: Optional[Any] = 0.1, a_: List[Any] = 100, a_: List[str] = 0.02, a_: Tuple=True, a_: Tuple = "prob", a_: Tuple = 5, a_: Optional[Any] = True, **a_: Dict, ):
'''simple docstring'''
_snake_case : List[str] = prediction_length
_snake_case : Any = context_length or prediction_length
_snake_case : Any = distribution_output
_snake_case : Tuple = loss
_snake_case : Dict = input_size
_snake_case : List[Any] = num_time_features
_snake_case : Tuple = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_snake_case : str = scaling
_snake_case : Optional[Any] = num_dynamic_real_features
_snake_case : Dict = num_static_real_features
_snake_case : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_snake_case : Union[str, Any] = cardinality
else:
_snake_case : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_snake_case : List[Any] = embedding_dimension
else:
_snake_case : List[str] = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
_snake_case : int = input_size * len(self.lags_sequence ) + self._number_of_features
_snake_case : Union[str, Any] = d_model
_snake_case : str = encoder_attention_heads
_snake_case : Tuple = decoder_attention_heads
_snake_case : Tuple = encoder_ffn_dim
_snake_case : Optional[int] = decoder_ffn_dim
_snake_case : List[str] = encoder_layers
_snake_case : int = decoder_layers
_snake_case : Dict = dropout
_snake_case : List[str] = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : List[Any] = encoder_layerdrop
_snake_case : Union[str, Any] = decoder_layerdrop
_snake_case : Tuple = activation_function
_snake_case : Union[str, Any] = init_std
_snake_case : Union[str, Any] = use_cache
# Informer
_snake_case : List[Any] = attention_type
_snake_case : Optional[int] = sampling_factor
_snake_case : Tuple = distil
super().__init__(is_encoder_decoder=lowercase__, **lowercase__ )
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 702 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_vision_model"
def __init__( self: Tuple, a_: str=768, a_: Union[str, Any]=12, a_: List[str]=3, a_: Optional[int]=16, a_: List[Any]=288, a_: Optional[Any]=1, a_: Any=1E-05, a_: Dict=False, a_: Any=True, a_: int=False, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = patch_size
_snake_case : Dict = image_size
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = layer_norm_eps
_snake_case : int = stop_gradient
_snake_case : Any = share_layernorm
_snake_case : List[Any] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: Union[str, os.PathLike], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower_text_model"
def __init__( self: str, a_: Dict=50_265, a_: List[Any]=768, a_: Union[str, Any]=12, a_: List[str]=12, a_: str=1, a_: Optional[Any]=3_072, a_: int="gelu", a_: int=0.1, a_: int=0.1, a_: Optional[int]=514, a_: Tuple=1, a_: Tuple=1E-05, a_: Optional[int]=1, a_: Union[str, Any]=0, a_: str=2, a_: Any="absolute", a_: List[Any]=True, **a_: Union[str, Any], ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : str = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = initializer_factor
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[Any] = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Dict = use_cache
_snake_case : int = pad_token_id
_snake_case : Union[str, Any] = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls: str, a_: Union[str, os.PathLike], **a_: int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_snake_case : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a_, **a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "bridgetower"
def __init__( self: int, a_: List[str]=True, a_: Any="gelu", a_: List[Any]=768, a_: int=1, a_: Optional[int]=1E-05, a_: Tuple=False, a_: Optional[Any]="add", a_: List[str]=12, a_: Union[str, Any]=6, a_: int=False, a_: Any=False, a_: Dict=None, a_: Any=None, **a_: str, ):
'''simple docstring'''
_snake_case : str = kwargs.pop("""text_config_dict""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""vision_config_dict""", a_ )
super().__init__(**a_ )
_snake_case : str = share_cross_modal_transformer_layers
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_size
_snake_case : Union[str, Any] = initializer_factor
_snake_case : Dict = layer_norm_eps
_snake_case : Dict = share_link_tower_layers
_snake_case : Optional[int] = link_tower_type
_snake_case : Any = num_attention_heads
_snake_case : int = num_hidden_layers
_snake_case : int = tie_word_embeddings
_snake_case : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_snake_case : str = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_snake_case : Any = BridgeTowerTextConfig(**a_ )
_snake_case : List[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : List[str] = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
| 28 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = 32
_snake_case : Union[str, Any] = embedder_hidden_size
# image encoding components
_snake_case : int = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
_snake_case : Any = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase__, projection_dim=lowercase__, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
_snake_case : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
_snake_case : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_snake_case : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowercase__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) )
torch.manual_seed(0 )
_snake_case : Optional[Any] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowercase__, layers_per_block=1, upcast_attention=lowercase__, use_linear_projection=lowercase__, )
torch.manual_seed(0 )
_snake_case : Tuple = DDIMScheduler(
beta_schedule="""scaled_linear""", beta_start=0.00_085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=lowercase__, steps_offset=1, )
torch.manual_seed(0 )
_snake_case : int = AutoencoderKL()
_snake_case : str = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def UpperCamelCase_ ( self: List[Any], a_: Tuple, a_: List[str]=0, a_: int=True ):
'''simple docstring'''
if str(lowercase__ ).startswith("""mps""" ):
_snake_case : List[Any] = torch.manual_seed(lowercase__ )
else:
_snake_case : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
_snake_case : List[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase__ ) ).to(lowercase__ )
if pil_image:
_snake_case : Tuple = input_image * 0.5 + 0.5
_snake_case : Optional[int] = input_image.clamp(0, 1 )
_snake_case : Dict = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
_snake_case : Tuple = DiffusionPipeline.numpy_to_pil(lowercase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : str = StableUnCLIPImgaImgPipeline(**lowercase__ )
_snake_case : Dict = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : int = self.get_dummy_inputs(lowercase__ )
inputs.update({"""image_embeds""": None} )
_snake_case : int = sd_pipe(**lowercase__ ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[int] = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_snake_case : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_snake_case : str = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""", torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_snake_case : List[str] = pipe(lowercase__, """anime turle""", generator=lowercase__, output_type="""np""" )
_snake_case : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__, lowercase__ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_snake_case : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""", torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_snake_case : Dict = pipe(lowercase__, """anime turle""", generator=lowercase__, output_type="""np""" )
_snake_case : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__, lowercase__ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""", torch_dtype=torch.floataa )
_snake_case : List[str] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : str = pipe(
lowercase__, """anime turtle""", num_inference_steps=2, output_type="""np""", )
_snake_case : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 703 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_snake_case : Any = dict(zip(__snake_case, range(len(__snake_case ) ) ) )
_snake_case : Optional[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_snake_case : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_snake_case : int = tempfile.mkdtemp()
_snake_case : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Tuple = os.path.join(self.tmpdirname, __snake_case )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
_snake_case : Dict = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self: Optional[int], **a_: Any ):
'''simple docstring'''
_snake_case : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **__snake_case )
def UpperCamelCase_ ( self: Optional[int], **a_: List[Any] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **__snake_case )
def UpperCamelCase_ ( self: Union[str, Any], **a_: List[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **__snake_case )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer()
_snake_case : Tuple = self.get_feature_extractor()
_snake_case : Optional[int] = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, __snake_case )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_snake_case : Any = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case, """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_feature_extractor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Tuple = floats_list((3, 1_000) )
_snake_case : Union[str, Any] = feature_extractor(__snake_case, return_tensors="""np""" )
_snake_case : Optional[Any] = processor(__snake_case, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_feature_extractor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Optional[Any] = self.get_decoder()
_snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Union[str, Any] = '''This is a test string'''
_snake_case : Optional[int] = processor(text=__snake_case )
_snake_case : Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def UpperCamelCase_ ( self: Dict, a_: List[str]=(2, 10, 16), a_: Tuple=77 ):
'''simple docstring'''
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_decoder()
_snake_case : int = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Dict = self._get_dummy_logits(shape=(10, 16), seed=13 )
_snake_case : Optional[Any] = processor.decode(__snake_case )
_snake_case : Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual("""</s> <s> </s>""", decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCamelCase_ ( self: Any, a_: str ):
'''simple docstring'''
_snake_case : str = self.get_feature_extractor()
_snake_case : str = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Tuple = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_snake_case : int = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
_snake_case : List[str] = processor.batch_decode(__snake_case, __snake_case )
_snake_case : List[Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
_snake_case : int = decoder.decode_beams_batch(__snake_case, __snake_case )
_snake_case : Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case, decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""], decoded_processor.text )
self.assertListEqual(__snake_case, decoded_processor.logit_score )
self.assertListEqual(__snake_case, decoded_processor.lm_score )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : List[Any] = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Dict = self._get_dummy_logits()
_snake_case : Optional[int] = 15
_snake_case : Optional[Any] = -20.0
_snake_case : int = -4.0
_snake_case : int = processor.batch_decode(
__snake_case, beam_width=__snake_case, beam_prune_logp=__snake_case, token_min_logp=__snake_case, )
_snake_case : List[str] = decoded_processor_out.text
_snake_case : List[str] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
_snake_case : List[Any] = decoder.decode_beams_batch(
__snake_case, __snake_case, beam_width=__snake_case, beam_prune_logp=__snake_case, token_min_logp=__snake_case, )
_snake_case : Dict = [d[0][0] for d in decoded_decoder_out]
_snake_case : Optional[int] = [d[0][2] for d in decoded_decoder_out]
_snake_case : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case, __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""], __snake_case )
self.assertTrue(np.array_equal(__snake_case, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], __snake_case, atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474], __snake_case, atol=1E-3 ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.get_feature_extractor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : List[str] = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
_snake_case : Dict = self._get_dummy_logits()
_snake_case : str = 2.0
_snake_case : Any = 5.0
_snake_case : Optional[Any] = -20.0
_snake_case : List[Any] = True
_snake_case : List[Any] = processor.batch_decode(
__snake_case, alpha=__snake_case, beta=__snake_case, unk_score_offset=__snake_case, lm_score_boundary=__snake_case, )
_snake_case : Dict = decoded_processor_out.text
_snake_case : List[Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case, beta=__snake_case, unk_score_offset=__snake_case, lm_score_boundary=__snake_case, )
with get_context("""fork""" ).Pool() as pool:
_snake_case : Dict = decoder.decode_beams_batch(
__snake_case, __snake_case, )
_snake_case : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case, __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""], __snake_case )
_snake_case : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, __snake_case )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : Any = os.listdir(__snake_case )
_snake_case : Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case, __snake_case )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
_snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : List[Any] = os.listdir(__snake_case )
_snake_case : Any = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case, __snake_case )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[Any] = floats_list((3, 1_000) )
_snake_case : Optional[Any] = processor_wavaveca(__snake_case, return_tensors="""np""" )
_snake_case : int = processor_auto(__snake_case, return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 )
_snake_case : Union[str, Any] = self._get_dummy_logits()
_snake_case : Optional[Any] = processor_wavaveca.batch_decode(__snake_case )
_snake_case : Optional[Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : Dict = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__snake_case, feature_extractor=__snake_case, decoder=__snake_case )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg="""`processor` and `feature_extractor` model input names do not match""", )
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[str] = self._get_dummy_logits()[0]
_snake_case : Any = processor.decode(__snake_case, output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case, __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""], """word""" ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """end_offset""" ), [1, 3, 5] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Optional[int] = self._get_dummy_logits()
_snake_case : List[str] = processor.batch_decode(__snake_case, output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case, __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case, """word""" ) ) for o in outputs["""word_offsets"""]], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """end_offset""" ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
import torch
_snake_case : Union[str, Any] = load_dataset("""common_voice""", """en""", split="""train""", streaming=__snake_case )
_snake_case : Optional[Any] = ds.cast_column("""audio""", datasets.Audio(sampling_rate=16_000 ) )
_snake_case : Dict = iter(__snake_case )
_snake_case : Tuple = next(__snake_case )
_snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_snake_case : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_snake_case : Optional[Any] = processor(sample["""audio"""]["""array"""], return_tensors="""pt""" ).input_values
with torch.no_grad():
_snake_case : Dict = model(__snake_case ).logits.cpu().numpy()
_snake_case : Dict = processor.decode(logits[0], output_word_offsets=__snake_case )
_snake_case : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_snake_case : Tuple = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_snake_case : Dict = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case, """word""" ) ), __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case, """word""" ) ), output.text )
# output times
_snake_case : List[Any] = torch.tensor(self.get_from_offsets(__snake_case, """start_time""" ) )
_snake_case : Any = torch.tensor(self.get_from_offsets(__snake_case, """end_time""" ) )
# fmt: off
_snake_case : Union[str, Any] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_snake_case : Optional[Any] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case, __snake_case, atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case, __snake_case, atol=0.01 ) )
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ (snake_case__ : int = 20 ):
"""simple docstring"""
_snake_case : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_snake_case : str = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = set()
# edges = list of graph's edges
_snake_case : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_snake_case : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A_ = '''bart'''
A_ = True
@st.cache(allow_output_mutation=__UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
if LOAD_DENSE_INDEX:
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_snake_case : str = qar_model.eval()
else:
_snake_case , _snake_case : List[Any] = (None, None)
if MODEL_TYPE == "bart":
_snake_case : Tuple = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_snake_case : Dict = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_snake_case : Tuple = sas_model.eval()
else:
_snake_case , _snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
if LOAD_DENSE_INDEX:
_snake_case : Optional[Any] = faiss.StandardGpuResources()
_snake_case : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
_snake_case : Tuple = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_28) , )
_snake_case : Any = faiss.IndexFlatIP(1_28 )
_snake_case : Dict = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase )
wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU
else:
_snake_case , _snake_case : List[Any] = (None, None)
_snake_case : Union[str, Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
_snake_case : int = elia["""train_eli5"""]
_snake_case : str = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_28) )
_snake_case : List[str] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCamelCase )
return (elia_train, eli5_train_q_index)
A_ , A_ , A_ = load_indexes()
A_ , A_ , A_ , A_ = load_models()
A_ , A_ = load_train_data()
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : str=10 ):
"""simple docstring"""
_snake_case : List[Any] = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase )
_snake_case , _snake_case : Any = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase )
_snake_case : Any = [elia_train[int(__UpperCamelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int="wiki40b" , snake_case__ : List[Any]="dense" , snake_case__ : Dict=10 ):
"""simple docstring"""
if source == "none":
_snake_case , _snake_case : Tuple = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_snake_case , _snake_case : str = query_qa_dense_index(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_snake_case , _snake_case : Dict = query_es_index(
__UpperCamelCase , __UpperCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCamelCase , )
_snake_case : List[Any] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_snake_case : int = """question: {} context: {}""".format(__UpperCamelCase , __UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str=64 , snake_case__ : Union[str, Any]=2_56 , snake_case__ : int=False , snake_case__ : List[str]=2 , snake_case__ : Dict=0.95 , snake_case__ : Dict=0.8 ):
"""simple docstring"""
with torch.no_grad():
_snake_case : Optional[Any] = qa_sas_generate(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=10_24 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
A_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
A_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
A_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
A_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
A_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
A_ = action_list.index(action_st)
A_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
A_ = show_type == '''Show full text of passages'''
else:
A_ = 3
A_ = True
A_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
A_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
A_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
A_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
A_ = '''wiki40b'''
A_ = '''dense'''
A_ = '''beam'''
A_ = 2
A_ = 64
A_ = 2_56
A_ = None
A_ = None
A_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
A_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
A_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
A_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
A_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
A_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A_ = None
# start main text
A_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
A_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A_ = st.text_input('''Enter your question here:''', '''''')
else:
A_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
A_ , A_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
A_ , A_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
A_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A_ = support_list[:10]
A_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
A_ , A_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A_ , A_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
A_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
A_ = res[1].strip()
if sec_titles == "":
A_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
A_ = sec_titles.split(''' & ''')
A_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
A_ = find_nearest_training(question)
A_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
A_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
A_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 707 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : Tuple ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : str , **snake_case__ : Any ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : Any , **snake_case__ : Optional[int] ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = random.Random()
_snake_case : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: List[str], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: int, a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[Any] = self._prepare_train_func(a_, a_, a_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : str = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[str] = self._prepare_inference_func(a_, a_, a_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: int, a_: int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], a_ )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Optional[int] = self._prepare_train_func(a_, a_, a_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : List[Any] = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Dict = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Any = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(a_, decoder_input_ids=a_, training=a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(a_, training=a_ )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : Tuple = (
hasattr(a_, """architectures""" )
and isinstance(config.architectures, a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : str = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Tuple = getattr(a_, a_ )
_snake_case : Any = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(a_, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : int = random_input_ids(a_, a_, a_ )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : Dict = model(a_, decoder_input_ids=a_, labels=a_, training=a_ )[0]
_snake_case : str = tf.gradients(a_, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Optional[Any] = model(a_, labels=a_, training=a_ )[0]
_snake_case : Optional[Any] = tf.gradients(a_, model.trainable_variables )
return gradients
_snake_case : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(a_, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
a_, repeat=self.args.repeat, number=10, )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def UpperCamelCase_ ( self: Optional[Any], a_: Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(a_ )
_snake_case : List[str] = meminfo.used
_snake_case : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : List[Any] = None
else:
_snake_case : int = measure_peak_memory_cpu(a_ )
_snake_case : List[str] = Memory(a_ ) if isinstance(a_, a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Tuple = stop_memory_tracing(a_ )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 28 | 0 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : str = R"""\w+[.]\d+"""
_snake_case : Optional[int] = re.findall(snake_case__ , snake_case__ )
for pat in pats:
_snake_case : int = key.replace(snake_case__ , """_""".join(pat.split(""".""" ) ) )
return key
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Dict = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_snake_case : List[str] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_snake_case : List[str] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_snake_case : List[str] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case : str = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_snake_case : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
_snake_case : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case : Union[str, Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case : Dict = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any]=42 ):
"""simple docstring"""
_snake_case : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_snake_case : Dict = flax_model.init_weights(PRNGKey(snake_case__ ) )
_snake_case : List[Any] = flatten_dict(snake_case__ )
_snake_case : Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : Optional[int] = rename_key(snake_case__ )
_snake_case : Dict = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
_snake_case , _snake_case : str = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
_snake_case : Optional[int] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
| 708 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A_ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
A_ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
A_ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
A_ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
A_ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Tuple ):
"""simple docstring"""
for tf_name, hf_name in patterns:
_snake_case : Optional[Any] = k.replace(snake_case_ , snake_case_ )
return k
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Optional[Any] = BigBirdPegasusConfig(**snake_case_ )
_snake_case : Tuple = BigBirdPegasusForConditionalGeneration(snake_case_ )
_snake_case : Union[str, Any] = torch_model.state_dict()
_snake_case : List[str] = {}
# separating decoder weights
_snake_case : Any = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_snake_case : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
_snake_case : Tuple = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_snake_case : Optional[int] = DECODER_PATTERNS
_snake_case : List[Any] = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_snake_case : List[Any] = v.T
_snake_case : Dict = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
_snake_case : Dict = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_snake_case : Any = REMAINING_PATTERNS
_snake_case : List[Any] = rename_state_dict_key(snake_case_ , snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_snake_case : List[Any] = v.T
_snake_case : Tuple = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
_snake_case : List[Any] = mapping["""model.embed_positions.weight"""]
_snake_case : List[str] = mapping.pop("""model.embed_positions.weight""" )
_snake_case : Dict = torch_model.load_state_dict(snake_case_ , strict=snake_case_ )
_snake_case : List[str] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Union[str, Any] = tf.train.list_variables(snake_case_ )
_snake_case : Any = {}
_snake_case : Optional[Any] = ["""global_step"""]
for name, shape in tqdm(snake_case_ , desc="""converting tf checkpoint to dict""" ):
_snake_case : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_snake_case : List[str] = tf.train.load_variable(snake_case_ , snake_case_ )
_snake_case : Optional[int] = array
return tf_weights
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : int = get_tf_weights_as_numpy(snake_case_ )
_snake_case : Optional[Any] = convert_bigbird_pegasus(snake_case_ , snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ = parser.parse_args()
A_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 709 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_snake_case : Dict = 8
else:
_snake_case : List[Any] = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_snake_case : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
_snake_case : Tuple = 2
# New Code #
_snake_case : Optional[int] = int(args.gradient_accumulation_steps )
_snake_case : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
_snake_case : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config['''lr''']
_snake_case : List[Any] = int(config["""num_epochs"""] )
_snake_case : int = int(config["""seed"""] )
_snake_case : List[Any] = int(config["""batch_size"""] )
_snake_case : List[Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
_snake_case : List[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
_snake_case : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
_snake_case : Tuple = model(**__UpperCamelCase )
_snake_case : str = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : str = model(**__UpperCamelCase )
_snake_case : Dict = outputs.logits.argmax(dim=-1 )
_snake_case : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
_snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Optional[int] = parser.parse_args()
_snake_case : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: Union[str, Any] = True, a_: Any = None, a_: Dict = 32, a_: List[str] = True, a_: Optional[int] = 1 / 255, a_: Dict = True, a_: List[str] = True, a_: Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073], a_: Dict = [0.26_862_954, 0.26_130_258, 0.27_577_711], a_: str = True, a_: Any=7, a_: int=30, a_: List[str]=400, a_: Any=3, ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : str = do_resize
_snake_case : str = size if size is not None else {"shortest_edge": 288}
_snake_case : Tuple = size_divisor
_snake_case : Optional[int] = do_rescale
_snake_case : Tuple = rescale_factor
_snake_case : List[Any] = do_normalize
_snake_case : List[str] = do_center_crop
_snake_case : List[Any] = image_mean
_snake_case : Optional[int] = image_std
_snake_case : Any = do_pad
_snake_case : List[str] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : Union[str, Any] = max_resolution
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase_ ( self: str, a_: Optional[int], a_: str=False ):
'''simple docstring'''
if not batched:
_snake_case : int = self.size["shortest_edge"]
_snake_case : List[str] = image_inputs[0]
if isinstance(a_, Image.Image ):
_snake_case : Optional[Any] = image.size
else:
_snake_case : Dict = image.shape[1], image.shape[2]
_snake_case : Any = size / min(a_, a_ )
if h < w:
_snake_case : Union[str, Any] = size, scale * w
else:
_snake_case : Dict = scale * h, size
_snake_case : Union[str, Any] = int((1_333 / 800) * size )
if max(a_, a_ ) > max_size:
_snake_case : int = max_size / max(a_, a_ )
_snake_case : Tuple = newh * scale
_snake_case : List[Any] = neww * scale
_snake_case : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
_snake_case : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_snake_case : Union[str, Any] = []
for image in image_inputs:
_snake_case : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : Tuple = max(a_, key=lambda a_ : item[0] )[0]
_snake_case : List[Any] = max(a_, key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase( _A , unittest.TestCase ):
'''simple docstring'''
lowercase__ = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """size_divisor""" ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : Union[str, Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : List[Any] = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : Optional[Any] = image_processing(a_, return_tensors="""pt""" ).pixel_values
_snake_case : List[Any] = self.image_processor_tester.get_expected_values(a_, batched=a_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
_snake_case : str = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : int = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : Optional[int] = image_processing(a_, return_tensors="""pt""" ).pixel_values
_snake_case : List[Any] = self.image_processor_tester.get_expected_values(a_, batched=a_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
_snake_case : List[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
_snake_case : List[str] = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case : str = image_processing(a_, return_tensors="""pt""" ).pixel_values
_snake_case : List[str] = self.image_processor_tester.get_expected_values(a_, batched=a_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
| 712 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: str = "cpu", a_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_snake_case : Optional[int] = device
_snake_case : str = CLIPTokenizerFast.from_pretrained(a_ )
_snake_case : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_snake_case : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_snake_case : str = torchvision.transforms.Normalize(self.image_mean, self.image_std )
_snake_case : Optional[int] = torchvision.transforms.Resize(224 )
_snake_case : str = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self: List[str], a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.resize(a_ )
_snake_case : List[Any] = self.center_crop(a_ )
_snake_case : Optional[Any] = self.normalize(a_ )
return images
def __call__( self: Any, a_: Optional[int]=None, a_: str=None, **a_: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer(text=a_, **a_ )
_snake_case : Any = self.preprocess_img(a_ )
_snake_case : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any]=10, a_: Optional[Any]=0.01, a_: List[str]=None, a_: str=None, a_: Any=None, a_: Tuple=None, a_: List[str]=None, a_: List[str]=None, a_: str=False, a_: List[str]=True, a_: Any="image", a_: Optional[Any]=True, a_: Dict=False, a_: List[str]=False, a_: Optional[int]=False, ):
'''simple docstring'''
super().__init__()
_snake_case : int = None
_snake_case : List[str] = device if device else get_device()
if vqgan:
_snake_case : Any = vqgan
else:
_snake_case : Optional[Any] = load_vqgan(self.device, conf_path=a_, ckpt_path=a_ )
self.vqgan.eval()
if clip:
_snake_case : Tuple = clip
else:
_snake_case : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_snake_case : List[str] = ProcessorGradientFlow(device=self.device )
_snake_case : Union[str, Any] = iterations
_snake_case : Dict = lr
_snake_case : Optional[int] = log
_snake_case : List[str] = make_grid
_snake_case : Union[str, Any] = return_val
_snake_case : List[str] = quantize
_snake_case : List[str] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self: Tuple, a_: str=None, a_: Dict=None, a_: Dict=5, a_: Dict=True ):
'''simple docstring'''
_snake_case : Dict = []
if output_path is None:
_snake_case : Tuple = """./animation.gif"""
if input_path is None:
_snake_case : Any = self.save_path
_snake_case : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(a_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(a_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_snake_case : List[Any] = total_duration / len(a_ )
_snake_case : Optional[Any] = [frame_duration] * len(a_ )
if extend_frames:
_snake_case : Optional[int] = 1.5
_snake_case : int = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_, a_, duration=a_ )
print(f"gif saved to {output_path}" )
def UpperCamelCase_ ( self: str, a_: Tuple=None, a_: Optional[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_snake_case : int = preprocess(Image.open(a_ ), target_image_size=256 ).to(self.device )
_snake_case : int = preprocess_vqgan(a_ )
_snake_case , *_snake_case : List[Any] = self.vqgan.encode(a_ )
return z
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.latent.detach().requires_grad_()
_snake_case : Tuple = base_latent + transform_vector
if self.quantize:
_snake_case , *_snake_case : Any = self.vqgan.quantize(a_ )
else:
_snake_case : List[Any] = trans_latent
return self.vqgan.decode(a_ )
def UpperCamelCase_ ( self: List[Any], a_: Any, a_: Union[str, Any], a_: Dict=None ):
'''simple docstring'''
_snake_case : Tuple = self.clip_preprocessor(text=a_, images=a_, return_tensors="""pt""", padding=a_ )
_snake_case : Any = self.clip(**a_ )
_snake_case : str = clip_outputs.logits_per_image
if weights is not None:
_snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self: Any, a_: Any, a_: List[str], a_: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""], a_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_snake_case : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""], a_, weights=neg_prompts["""weights"""] )
else:
_snake_case : Tuple = torch.tensor([1], device=self.device )
_snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def UpperCamelCase_ ( self: Optional[Any], a_: Tuple, a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.randn_like(self.latent, requires_grad=a_, device=self.device )
_snake_case : Dict = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case : str = self._add_vector(a_ )
_snake_case : List[Any] = loop_post_process(a_ )
_snake_case : List[Any] = self._get_CLIP_loss(a_, a_, a_ )
print("""CLIP loss""", a_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self: int, a_: Any, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=a_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_snake_case : Any = Image.open(a_ )
_snake_case : str = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(a_ ) )
def UpperCamelCase_ ( self: str, a_: List[Any] ):
'''simple docstring'''
if not prompts:
return []
_snake_case : List[str] = []
_snake_case : Tuple = []
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(a_, (tuple, list) ):
_snake_case : List[Any] = prompt[0]
_snake_case : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
_snake_case , _snake_case : List[Any] = prompt.split(""":""" )
_snake_case : str = float(a_ )
else:
_snake_case : int = prompt
_snake_case : Union[str, Any] = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_, device=self.device ),
}
def UpperCamelCase_ ( self: Dict, a_: List[Any], a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Any]=True, a_: Dict=False, a_: Optional[Any]=True, a_: Optional[Any]=True, a_: Any=None, ):
'''simple docstring'''
if image_path:
_snake_case : Union[str, Any] = self._get_latent(a_ )
else:
_snake_case : Any = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(a_, a_, a_ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case : str = self.process_prompts(a_ )
_snake_case : Dict = self.process_prompts(a_ )
if save_final and save_path is None:
_snake_case : Any = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
_snake_case : List[Any] = save_path + """_""" + get_timestamp()
os.makedirs(a_ )
_snake_case : Optional[Any] = save_path
_snake_case : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(a_ ) )
_snake_case : List[Any] = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_, a_, a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 713 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase( UpperCamelCase__ ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = jnp.floataa
lowercase__ = True
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().setup()
_snake_case : Optional[Any] = nn.Dense(5, dtype=self.dtype )
def __call__( self: Union[str, Any], *a_: Optional[Any], **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = super().__call__(*__A, **__A )
_snake_case : Tuple = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase( UpperCamelCase__ ):
'''simple docstring'''
lowercase__ = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
"""simple docstring"""
def cross_entropy(snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ):
_snake_case : str = logits.shape[-1]
_snake_case : Dict = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
_snake_case : Optional[int] = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
_snake_case : List[str] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_snake_case : List[str] = reduction(UpperCamelCase__ )
return loss
_snake_case : Dict = partial(UpperCamelCase__ , reduction=jnp.mean )
_snake_case : List[Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_snake_case : Optional[int] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_snake_case : Optional[Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = "google/bigbird-roberta-base"
lowercase__ = 30_00
lowercase__ = 1_05_00
lowercase__ = 1_28
lowercase__ = 3
lowercase__ = 1
lowercase__ = 5
# tx_args
lowercase__ = 3e-5
lowercase__ = 0.0
lowercase__ = 2_00_00
lowercase__ = 0.0095
lowercase__ = "bigbird-roberta-natural-questions"
lowercase__ = "training-expt"
lowercase__ = "data/nq-training.jsonl"
lowercase__ = "data/nq-validation.jsonl"
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
os.makedirs(self.base_dir, exist_ok=__A )
_snake_case : str = os.path.join(self.base_dir, self.save_dir )
_snake_case : List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 40_96 # no dynamic padding on TPUs
def __call__( self: str, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.collate_fn(__A )
_snake_case : Union[str, Any] = jax.tree_util.tree_map(__A, __A )
return batch
def UpperCamelCase_ ( self: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.fetch_inputs(features["""input_ids"""] )
_snake_case : Optional[int] = {
"""input_ids""": jnp.array(__A, dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A, dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""], dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""], dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""], dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self: List[Any], a_: list ):
'''simple docstring'''
_snake_case : str = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def UpperCamelCase_ ( self: Union[str, Any], a_: list ):
'''simple docstring'''
_snake_case : Optional[Any] = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int=None ):
"""simple docstring"""
if seed is not None:
_snake_case : List[str] = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
_snake_case : Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int , **snake_case__ : Dict ):
"""simple docstring"""
def loss_fn(snake_case__ : Any ):
_snake_case : Optional[int] = model_inputs.pop("""start_labels""" )
_snake_case : Dict = model_inputs.pop("""end_labels""" )
_snake_case : Optional[int] = model_inputs.pop("""pooled_labels""" )
_snake_case : int = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
_snake_case , _snake_case , _snake_case : Optional[int] = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_snake_case , _snake_case : Dict = jax.random.split(UpperCamelCase__ )
_snake_case : Optional[int] = jax.value_and_grad(UpperCamelCase__ )
_snake_case , _snake_case : int = grad_fn(state.params )
_snake_case : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
_snake_case : Dict = jax.lax.pmean(UpperCamelCase__ , """batch""" )
_snake_case : Union[str, Any] = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : int = model_inputs.pop("""start_labels""" )
_snake_case : List[Any] = model_inputs.pop("""end_labels""" )
_snake_case : Optional[Any] = model_inputs.pop("""pooled_labels""" )
_snake_case : Union[str, Any] = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
_snake_case , _snake_case , _snake_case : Optional[Any] = outputs
_snake_case : int = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_snake_case : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowercase( train_state.TrainState ):
'''simple docstring'''
lowercase__ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = None
def UpperCamelCase_ ( self: Dict, a_: int, a_: str, a_: Optional[Any], a_: Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = model.params
_snake_case : str = TrainState.create(
apply_fn=model.__call__, params=__A, tx=__A, loss_fn=__A, )
if ckpt_dir is not None:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = restore_checkpoint(__A, __A )
_snake_case : Tuple = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_snake_case , _snake_case : Any = build_tx(**__A )
_snake_case : str = train_state.TrainState(
step=__A, apply_fn=model.__call__, params=__A, tx=__A, opt_state=__A, )
_snake_case : Any = args
_snake_case : Tuple = data_collator
_snake_case : Optional[int] = lr
_snake_case : Optional[int] = params
_snake_case : Dict = jax_utils.replicate(__A )
return state
def UpperCamelCase_ ( self: Union[str, Any], a_: Any, a_: Optional[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.args
_snake_case : List[str] = len(__A ) // args.batch_size
_snake_case : List[str] = jax.random.PRNGKey(0 )
_snake_case : List[str] = jax.random.split(__A, jax.device_count() )
for epoch in range(args.max_epochs ):
_snake_case : str = jnp.array(0, dtype=jnp.floataa )
_snake_case : List[Any] = get_batched_dataset(__A, args.batch_size, seed=__A )
_snake_case : Any = 0
for batch in tqdm(__A, total=__A, desc=f"Running EPOCH-{epoch}" ):
_snake_case : Any = self.data_collator(__A )
_snake_case , _snake_case , _snake_case : List[Any] = self.train_step_fn(__A, __A, **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
_snake_case : Optional[Any] = jax_utils.unreplicate(state.step )
_snake_case : List[str] = running_loss.item() / i
_snake_case : Any = self.scheduler_fn(state_step - 1 )
_snake_case : Optional[Any] = self.evaluate(__A, __A )
_snake_case : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A, commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=__A )
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case : int = get_batched_dataset(__A, self.args.batch_size )
_snake_case : Dict = len(__A ) // self.args.batch_size
_snake_case : Tuple = jnp.array(0, dtype=jnp.floataa )
_snake_case : List[Any] = 0
for batch in tqdm(__A, total=__A, desc="""Evaluating ... """ ):
_snake_case : List[Any] = self.data_collator(__A )
_snake_case : int = self.val_step_fn(__A, **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self: int, a_: int, a_: Tuple ):
'''simple docstring'''
_snake_case : int = jax_utils.unreplicate(__A )
print(f"SAVING CHECKPOINT IN {save_dir}", end=""" ... """ )
self.model_save_fn(__A, params=state.params )
with open(os.path.join(__A, """opt_state.msgpack""" ), """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(__A, """args.joblib""" ) )
joblib.dump(self.data_collator, os.path.join(__A, """data_collator.joblib""" ) )
with open(os.path.join(__A, """training_state.json""" ), """w""" ) as f:
json.dump({"""step""": state.step.item()}, __A )
print("""DONE""" )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
_snake_case : str = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
_snake_case : List[str] = from_bytes(state.opt_state , f.read() )
_snake_case : Optional[Any] = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
_snake_case : Optional[Any] = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
_snake_case : List[str] = json.load(UpperCamelCase__ )
_snake_case : int = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = num_train_steps - warmup_steps
_snake_case : str = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
_snake_case : Optional[Any] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
_snake_case : str = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
def weight_decay_mask(snake_case__ : Optional[Any] ):
_snake_case : List[Any] = traverse_util.flatten_dict(UpperCamelCase__ )
_snake_case : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
_snake_case : Any = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_snake_case : Union[str, Any] = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase( __UpperCAmelCase ):
'''simple docstring'''
lowercase__ = '''char'''
lowercase__ = '''bpe'''
lowercase__ = '''wp'''
A_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase( __UpperCAmelCase ):
'''simple docstring'''
lowercase__ = ['''image_processor''', '''char_tokenizer''']
lowercase__ = '''ViTImageProcessor'''
lowercase__ = '''MgpstrTokenizer'''
def __init__( self: List[Any], a_: Any=None, a_: Optional[Any]=None, **a_: str ):
'''simple docstring'''
_snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", __SCREAMING_SNAKE_CASE, )
_snake_case : Any = kwargs.pop("""feature_extractor""" )
_snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_snake_case : int = tokenizer
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""gpt2""" )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
def __call__( self: Tuple, a_: Dict=None, a_: List[str]=None, a_: int=None, **a_: Optional[int] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : str = self.image_processor(__SCREAMING_SNAKE_CASE, return_tensors=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
if text is not None:
_snake_case : Union[str, Any] = self.char_tokenizer(__SCREAMING_SNAKE_CASE, return_tensors=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : Union[str, Any] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = sequences
_snake_case : Optional[int] = char_preds.size(0 )
_snake_case , _snake_case : Tuple = self._decode_helper(__SCREAMING_SNAKE_CASE, """char""" )
_snake_case , _snake_case : Tuple = self._decode_helper(__SCREAMING_SNAKE_CASE, """bpe""" )
_snake_case , _snake_case : Dict = self._decode_helper(__SCREAMING_SNAKE_CASE, """wp""" )
_snake_case : Union[str, Any] = []
_snake_case : List[str] = []
for i in range(__SCREAMING_SNAKE_CASE ):
_snake_case : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_snake_case : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
_snake_case : List[str] = scores.index(max(__SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_snake_case : str = {}
_snake_case : Any = final_strs
_snake_case : List[str] = final_scores
_snake_case : Union[str, Any] = char_strs
_snake_case : int = bpe_strs
_snake_case : Optional[int] = wp_strs
return out
def UpperCamelCase_ ( self: List[str], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_snake_case : Optional[int] = self.char_decode
_snake_case : Union[str, Any] = 1
_snake_case : List[Any] = """[s]"""
elif format == DecodeType.BPE:
_snake_case : List[Any] = self.bpe_decode
_snake_case : int = 2
_snake_case : Any = """#"""
elif format == DecodeType.WORDPIECE:
_snake_case : Optional[int] = self.wp_decode
_snake_case : Optional[int] = 102
_snake_case : Optional[int] = """[SEP]"""
else:
raise ValueError(f"Format {format} is not supported." )
_snake_case , _snake_case : Any = [], []
_snake_case : List[Any] = pred_logits.size(0 )
_snake_case : str = pred_logits.size(1 )
_snake_case , _snake_case : Tuple = pred_logits.topk(1, dim=-1, largest=__SCREAMING_SNAKE_CASE, sorted=__SCREAMING_SNAKE_CASE )
_snake_case : Optional[Any] = preds_index.view(-1, __SCREAMING_SNAKE_CASE )[:, 1:]
_snake_case : int = decoder(__SCREAMING_SNAKE_CASE )
_snake_case , _snake_case : str = torch.nn.functional.softmax(__SCREAMING_SNAKE_CASE, dim=2 ).max(dim=2 )
_snake_case : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(__SCREAMING_SNAKE_CASE ):
_snake_case : List[str] = preds_str[index].find(__SCREAMING_SNAKE_CASE )
_snake_case : Dict = preds_str[index][:pred_eos]
_snake_case : List[Any] = preds_index[index].cpu().tolist()
_snake_case : List[Any] = pred_index.index(__SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
_snake_case : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1]
_snake_case : Dict = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__SCREAMING_SNAKE_CASE )
conf_scores.append(__SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def UpperCamelCase_ ( self: Tuple, a_: List[str] ):
'''simple docstring'''
_snake_case : int = [seq.replace(""" """, """""" ) for seq in self.char_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
def UpperCamelCase_ ( self: Dict, a_: Tuple ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self: List[Any], a_: Tuple ):
'''simple docstring'''
_snake_case : Any = [seq.replace(""" """, """""" ) for seq in self.wp_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase__ ():
"""simple docstring"""
raise RuntimeError("""CUDA out of memory.""" )
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Any = nn.Linear(3, 4 )
_snake_case : List[str] = nn.BatchNormad(4 )
_snake_case : Any = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: int, a_: Optional[int] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_: Any ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a, [128, 64, 32, 16, 8] )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_: Dict, a_: Any ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_snake_case : List[str] = mock_training_loop_function("""hello""" )
self.assertListEqual(__a, [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, """hello"""] )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a_: Optional[Any] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""", cm.exception.args[0] )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_: List[str] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""", cm.exception.args[0] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a_: Tuple, a_: str, a_: int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(128, """hello""", """world""" )
self.assertIn("""Batch size was passed into `f`""", cm.exception.args[0] )
self.assertIn("""`f(arg1=\'hello\', arg2=\'world\')""", cm.exception.args[0] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_: Any ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""", cm.exception.args[0] )
@require_cuda
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = torch.cuda.memory_allocated()
_snake_case : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), __a )
_snake_case : List[str] = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated(), __a )
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.