code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase_ ( _snake_case ):
'''simple docstring'''
a__ = "roformer"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=5_00_00 , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=30_72 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Dict=15_36 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : int , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size if embedding_size is None else embedding_size
A : str = hidden_size
A : Optional[int] = num_hidden_layers
A : str = num_attention_heads
A : List[str] = hidden_act
A : List[Any] = intermediate_size
A : List[str] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Any = initializer_range
A : int = layer_norm_eps
A : Optional[Any] = rotary_value
A : str = use_cache
class lowerCamelCase_ ( _snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
A : Any = {0: "batch", 1: "sequence"}
A : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ) -> int:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Dict=(4, 4, 64, 64) , __lowerCamelCase : Dict=False ) -> Optional[Any]:
A : List[str] = jnp.bfloataa if fpaa else jnp.floataa
A : int = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return image
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int="CompVis/stable-diffusion-v1-4" ) -> int:
A : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = "bf16" if fpaa else None
A , A : int = FlaxUNetaDConditionModel.from_pretrained(
A_ , subfolder="unet" , dtype=A_ , revision=A_ )
return model, params
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=(4, 77, 7_68) , __lowerCamelCase : Union[str, Any]=False ) -> Any:
A : Any = jnp.bfloataa if fpaa else jnp.floataa
A : Dict = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Any:
A , A : Tuple = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=A_ )
A : Tuple = self.get_latents(A_ , fpaa=A_ )
A : Optional[int] = self.get_encoder_hidden_states(A_ , fpaa=A_ )
A : List[Any] = model.apply(
{"params": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
A : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A : str = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> Any:
A , A : str = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=A_ )
A : Optional[int] = self.get_latents(A_ , shape=(4, 4, 96, 96) , fpaa=A_ )
A : List[Any] = self.get_encoder_hidden_states(A_ , shape=(4, 77, 10_24) , fpaa=A_ )
A : Dict = model.apply(
{"params": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
A : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A : str = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 ) | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
import qiskit
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = qiskit.Aer.get_backend("aer_simulator" )
A : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A : Tuple = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""") | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger("""transformers.models.encodec""")
__SCREAMING_SNAKE_CASE = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__SCREAMING_SNAKE_CASE = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__SCREAMING_SNAKE_CASE = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__SCREAMING_SNAKE_CASE = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__SCREAMING_SNAKE_CASE = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : Optional[Any] = getattr(_A , _A )
if weight_type is not None:
A : List[str] = getattr(_A , _A ).shape
else:
A : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A : Dict = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : Optional[int] = value
elif weight_type == "bias":
A : Tuple = value
elif weight_type == "running_mean":
A : List[Any] = value
elif weight_type == "running_var":
A : str = value
elif weight_type == "num_batches_tracked":
A : List[Any] = value
elif weight_type == "weight_ih_l0":
A : Optional[Any] = value
elif weight_type == "weight_hh_l0":
A : Tuple = value
elif weight_type == "bias_ih_l0":
A : str = value
elif weight_type == "bias_hh_l0":
A : Optional[int] = value
elif weight_type == "weight_ih_l1":
A : Dict = value
elif weight_type == "weight_hh_l1":
A : str = value
elif weight_type == "bias_ih_l1":
A : Optional[Any] = value
elif weight_type == "bias_hh_l1":
A : int = value
else:
A : Optional[Any] = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A : Any = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
A : str = MAPPING_24K
elif model_name == "encodec_48khz":
A : Any = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_A , _A ):
logger.info(f"""{name} was ignored""" )
continue
A : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A : Any = key.split(".*." )
if prefix in name and suffix in name:
A : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
A : Optional[Any] = True
if "*" in mapped_key:
A : Optional[Any] = name.split(_A )[0].split("." )[-2]
A : str = mapped_key.replace("*" , _A )
if "weight_g" in name:
A : Any = "weight_g"
elif "weight_v" in name:
A : List[Any] = "weight_v"
elif "weight_ih_l0" in name:
A : Union[str, Any] = "weight_ih_l0"
elif "weight_hh_l0" in name:
A : Dict = "weight_hh_l0"
elif "bias_ih_l0" in name:
A : Optional[Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
A : Optional[int] = "bias_hh_l0"
elif "weight_ih_l1" in name:
A : Dict = "weight_ih_l1"
elif "weight_hh_l1" in name:
A : Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
A : List[str] = "bias_ih_l1"
elif "bias_hh_l1" in name:
A : List[Any] = "bias_hh_l1"
elif "bias" in name:
A : str = "bias"
elif "weight" in name:
A : List[Any] = "weight"
elif "running_mean" in name:
A : Any = "running_mean"
elif "running_var" in name:
A : List[Any] = "running_var"
elif "num_batches_tracked" in name:
A : str = "num_batches_tracked"
else:
A : Optional[int] = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
if config_path is not None:
A : str = EncodecConfig.from_pretrained(_A )
else:
A : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A : Union[str, Any] = [8, 5, 4, 4]
A : Tuple = [2.2]
A : int = 64
A : List[Any] = 3_2000
A : Optional[int] = 2048
A : Optional[Any] = False
A : int = False
A : int = False
elif model_name == "encodec_48khz":
A : Any = [8, 5, 4, 2]
A : str = [3.0, 6.0, 12.0, 24.0]
A : Dict = 4_8000
A : Optional[int] = 2
A : Union[str, Any] = False
A : str = "time_group_norm"
A : int = True
A : Optional[Any] = 1.0
A : Union[str, Any] = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A : Dict = EncodecModel(_A )
A : Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_A )
A : Dict = torch.load(_A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A : List[str] = original_checkpoint["best_state"]
recursively_load_weights(_A , _A , _A )
model.save_pretrained(_A )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
A : Optional[int] = 'ZinengTang/tvlt-base'
A : Optional[int] = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : str , **__lowerCamelCase : int ) -> Dict:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
A : Dict = self.get_image_processor()
A : Union[str, Any] = self.get_feature_extractor()
A : Any = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
A : Union[str, Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
A : Optional[int] = self.get_image_processor()
A : Tuple = self.get_feature_extractor()
A : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
A : List[str] = np.ones([1_20_00] )
A : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors="np" )
A : Optional[Any] = processor(audio=UpperCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : int = self.get_image_processor()
A : Optional[int] = self.get_feature_extractor()
A : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
A : Dict = np.ones([3, 2_24, 2_24] )
A : str = image_processor(UpperCAmelCase_ , return_tensors="np" )
A : Any = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
A : List[Any] = self.get_image_processor()
A : List[Any] = self.get_feature_extractor()
A : Tuple = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
A : int = np.ones([1_20_00] )
A : Optional[Any] = np.ones([3, 2_24, 2_24] )
A : str = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : Tuple = self.get_image_processor()
A : Dict = self.get_feature_extractor()
A : Dict = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , ) | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = False ):
if not arr:
return 0
A : Dict = 0 if allow_empty_subarrays else float("-inf" )
A : str = 0.0
for num in arr:
A : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
A : Tuple = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""") | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,__A ,)
class lowerCamelCase_ ( __A ):
'''simple docstring'''
a__ = RobertaConfig
a__ = "roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> Optional[int]:
super().__init__(__lowerCamelCase )
A : List[str] = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,__A ,)
class lowerCamelCase_ ( __A ):
'''simple docstring'''
a__ = RobertaConfig
a__ = "roberta"
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
super().__init__(__lowerCamelCase )
A : Optional[int] = config.num_labels
A : List[Any] = config.num_hidden_layers
A : Any = DeeRobertaModel(__lowerCamelCase )
A : str = nn.Dropout(config.hidden_dropout_prob )
A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=None , __lowerCamelCase : Any=-1 , __lowerCamelCase : Union[str, Any]=False , ) -> Dict:
A : List[str] = self.num_layers
try:
A : Tuple = self.roberta(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , )
A : Optional[Any] = outputs[1]
A : Any = self.dropout(__lowerCamelCase )
A : int = self.classifier(__lowerCamelCase )
A : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A : List[str] = e.message
A : Optional[Any] = e.exit_layer
A : Any = outputs[0]
if not self.training:
A : List[str] = entropy(__lowerCamelCase )
A : int = []
A : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A : Dict = MSELoss()
A : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A : str = CrossEntropyLoss()
A : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A : List[Any] = []
for highway_exit in outputs[-1]:
A : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A : Union[str, Any] = MSELoss()
A : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A : Tuple = CrossEntropyLoss()
A : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
A : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A : Any = (loss,) + outputs
if not self.training:
A : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Tuple:
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : int ) -> List[str]:
A : str = {}
if "candidate_labels" in kwargs:
A : Dict = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
A : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]="This is a photo of {}." ) -> Dict:
A : Optional[Any] = load_image(_lowerCAmelCase )
A : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
A : Union[str, Any] = candidate_labels
A : List[str] = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
A : Any = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
A : List[Any] = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
A : Union[str, Any] = model_inputs.pop("candidate_labels" )
A : List[str] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
A : Dict = text_inputs[0]
else:
# Batching case.
A : Any = text_inputs[0][0]
A : Any = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
A : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : List[Any] ) -> List[Any]:
A : Union[str, Any] = model_outputs.pop("candidate_labels" )
A : Any = model_outputs["logits"][0]
if self.framework == "pt":
A : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
A : Any = probs.tolist()
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = [scores]
elif self.framework == "tf":
A : int = stable_softmax(_lowerCAmelCase , axis=-1 )
A : Tuple = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A : Any = -1
return False
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return [] | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase = 6008_5147_5143 ):
try:
A : int = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A : Union[str, Any] = 2
A : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A : Union[str, Any] = i
while n % i == 0:
A : Optional[int] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCamelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Dict=1_28 , __lowerCamelCase : List[Any]=40_96 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Any=10_24 , __lowerCamelCase : Any=3 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Dict=1e-5 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : str=5_02_56 , __lowerCamelCase : Tuple=5_02_56 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[int] , ) -> List[str]:
A : Optional[Any] = state_dim
A : Dict = act_dim
A : Dict = hidden_size
A : Union[str, Any] = max_ep_len
A : Tuple = action_tanh
A : List[str] = vocab_size
A : Optional[Any] = n_positions
A : Optional[int] = n_layer
A : str = n_head
A : Union[str, Any] = n_inner
A : List[Any] = activation_function
A : Optional[Any] = resid_pdrop
A : List[str] = embd_pdrop
A : Union[str, Any] = attn_pdrop
A : Tuple = layer_norm_epsilon
A : str = initializer_range
A : Optional[Any] = scale_attn_weights
A : Tuple = use_cache
A : Union[str, Any] = scale_attn_by_inverse_layer_idx
A : int = reorder_and_upcast_attn
A : Tuple = bos_token_id
A : Dict = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A : List[str] = [144, 192, 240]
A : Any = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A : str = [96, 120, 144]
A : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A : Optional[Any] = [64, 80, 96]
A : Tuple = [16, 16, 24, 48, 64, 80, 320]
A : List[str] = 0.05
A : List[str] = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
A : str = 512
A : Tuple = 16
A : str = 21
A : int = "pascal-voc-id2label.json"
else:
A : str = 1000
A : Optional[Any] = "imagenet-1k-id2label.json"
A : Dict = "huggingface/label-files"
A : List[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) )
A : List[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
A : Union[str, Any] = idalabel
A : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ):
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
A : List[Any] = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
A : int = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
A : str = name.replace(".block." , "." )
if "exp_1x1" in name:
A : List[str] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
A : Optional[Any] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
A : List[Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
A : int = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
A : Any = name.replace(".norm." , ".normalization." )
if ".conv." in name:
A : Optional[int] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
A : List[Any] = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
A : Optional[int] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
A : List[str] = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
A : int = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
A : Any = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
A : Optional[int] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
A : Optional[Any] = name.replace(f""".global_rep.{i}.weight""" , ".layernorm.weight" )
if f""".global_rep.{i}.bias""" in name:
A : str = name.replace(f""".global_rep.{i}.bias""" , ".layernorm.bias" )
if ".global_rep." in name:
A : str = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
A : int = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
A : Union[str, Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
A : Optional[Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
A : Dict = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
A : Union[str, Any] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
A : Any = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
A : Optional[Any] = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
A : List[Any] = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
A : List[str] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
A : Union[str, Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
A : List[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
A : Union[str, Any] = "mobilevit." + name
return name
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
if base_model:
A : Optional[Any] = ""
else:
A : Any = "mobilevit."
for key in orig_state_dict.copy().keys():
A : Tuple = orig_state_dict.pop(__UpperCAmelCase )
if key[:8] == "encoder.":
A : str = key[8:]
if "qkv" in key:
A : Optional[Any] = key.split("." )
A : List[Any] = int(key_split[0][6:] ) - 1
A : Optional[int] = int(key_split[3] )
A : Dict = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
A : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A : str = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
A : int = val[:dim, :]
A : Optional[Any] = val[dim : dim * 2, :]
A : Union[str, Any] = val[-dim:, :]
else:
A : Tuple = val[:dim]
A : List[str] = val[dim : dim * 2]
A : Any = val[-dim:]
else:
A : Tuple = val
return orig_state_dict
def UpperCAmelCase ( ):
A : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
A : Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
A : Optional[int] = get_mobilevit_config(__UpperCAmelCase )
# load original state_dict
A : Optional[int] = torch.load(__UpperCAmelCase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
A : Any = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval()
else:
A : List[Any] = MobileViTForImageClassification(__UpperCAmelCase ).eval()
A : Union[str, Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
A : Any = model(**__UpperCAmelCase )
A : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A : List[str] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A : List[str] = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A : List[Any] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.86_24, -9.59_64], [-10.8840, -10.8158, -10.6659]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A : Union[str, Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
A : Union[str, Any] = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
A : Union[str, Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
A : Dict = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
A : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCAmelCase , organization="apple" )
model.push_to_hub(__UpperCAmelCase , organization="apple" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "LayoutLMv2ImageProcessor"
a__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Any , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Optional[int] ) -> Any:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
A : Optional[Any] = kwargs.pop("feature_extractor" )
A : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Any , ) -> BatchEncoding:
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
A : Optional[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
A : Optional[int] = features["words"]
A : Any = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
A : List[str] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
A : List[str] = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
A : List[Any] = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> Optional[int]:
A : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor | 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 17 | 0 |
'''simple docstring'''
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A : str = str(bin(__A ) )[2:] # remove the leading "0b"
A : Dict = str(bin(__A ) )[2:] # remove the leading "0b"
A : Optional[Any] = max(len(__A ) , len(__A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__A ) , b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 715 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 17 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : List[str]=14 , __lowerCamelCase : str=10 , __lowerCamelCase : Dict=19 , __lowerCamelCase : int=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : int=True , __lowerCamelCase : str=16 , __lowerCamelCase : Any=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=[1, 2, 3, 4, 5] , __lowerCamelCase : Tuple=25 , __lowerCamelCase : Optional[Any]=5 , ) -> List[Any]:
A : Optional[Any] = d_model
A : int = parent
A : str = batch_size
A : Union[str, Any] = prediction_length
A : Union[str, Any] = context_length
A : List[Any] = cardinality
A : int = num_time_features
A : Tuple = lags_sequence
A : Union[str, Any] = embedding_dimension
A : Optional[Any] = is_training
A : Optional[Any] = hidden_size
A : Tuple = num_hidden_layers
A : Any = num_attention_heads
A : Union[str, Any] = intermediate_size
A : int = hidden_act
A : int = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : str = context_length
A : Any = prediction_length + label_length
A : Any = label_length
A : Tuple = moving_average
A : Tuple = autocorrelation_factor
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Any ) -> List[str]:
A : List[Any] = config.context_length + max(config.lags_sequence )
A : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A : Tuple = floats_tensor([self.batch_size, _past_length] )
A : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
A : Optional[Any] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
A : Optional[int] = self.get_config()
A : Dict = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
A : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> str:
A : Optional[int] = AutoformerModel(config=_A ).to(_A ).eval()
A : Tuple = model(**_A )
A : Any = outputs.encoder_last_hidden_state
A : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(_A )
A : List[Any] = AutoformerEncoder.from_pretrained(_A ).to(_A )
A : Dict = model.create_network_inputs(**_A )
A : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A : List[str] = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A : str = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = model.get_decoder()
decoder.save_pretrained(_A )
A : Dict = AutoformerDecoder.from_pretrained(_A ).to(_A )
A : Tuple = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase_ ( a__ ,a__ ,unittest.TestCase ):
'''simple docstring'''
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
A : Optional[Any] = AutoformerModelTester(self )
A : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
A : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A : Dict = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
A : Optional[Any] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason="Model has no tokens embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
A : str = inspect.signature(getattr(_A , "forward" ) )
# The main input is the name of the argument after `self`
A : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(_A )
A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : Dict = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = True
A : str = getattr(self.model_tester , "seq_length" , _A )
A : List[str] = getattr(self.model_tester , "decoder_seq_length" , _A )
A : int = getattr(self.model_tester , "encoder_seq_length" , _A )
A : List[Any] = getattr(self.model_tester , "d_model" , _A )
A : str = getattr(self.model_tester , "num_attention_heads" , _A )
A : Optional[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
A : Any = True
A : Any = False
A : int = True
A : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
A : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
A : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A : int = True
A : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
A : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
A : List[Any] = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A : Union[str, Any] = len(_A )
A : Union[str, Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
A : Dict = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A : Any = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A : str = True
A : List[Any] = True
A : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
A : Dict = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
A : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase ( _lowerCamelCase="train-batch.pt" ):
A : int = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCamelCase , repo_type="dataset" )
A : Optional[Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
A : Optional[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_A )
A : Dict = prepare_batch()
with torch.no_grad():
A : Union[str, Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
A : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
A : Dict = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
A : Any = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_A )
A : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
A : List[str] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
A : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
A : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
A : Union[str, Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_A )
A : int = prepare_batch("val-batch.pt" )
with torch.no_grad():
A : Any = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
A : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
A : Dict = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=_A )
A : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1e-1 ) ) | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
'''simple docstring'''
import re
def UpperCAmelCase ( _lowerCamelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def UpperCAmelCase ( _lowerCamelCase ):
A : List[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
try:
A : Any = split_input(a_ )
if upper:
A : Any = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A : int = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCAmelCase ( _lowerCamelCase ):
return to_simple_case(a_ )
def UpperCAmelCase ( _lowerCamelCase ):
try:
A : Tuple = to_simple_case(a_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(a_ , a_ , "_" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(a_ , a_ , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod() | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
import math
def UpperCAmelCase ( _lowerCamelCase ):
A : str = 0
A : List[Any] = 0
while num > 0:
A : Tuple = num % 8
A : Tuple = octal + (remainder * math.floor(math.pow(10 , _A ) ))
counter += 1
A : str = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(_A )}"""
def UpperCAmelCase ( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main() | 718 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) | 17 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=7 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : str=30 , __lowerCamelCase : List[Any]=4_00 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=True , ) -> Any:
A : Optional[int] = size if size is not None else {"height": 18, "width": 18}
A : List[str] = parent
A : str = batch_size
A : List[str] = num_channels
A : List[Any] = image_size
A : Optional[Any] = min_resolution
A : List[Any] = max_resolution
A : Dict = do_resize
A : Any = size
A : str = do_normalize
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _a ,unittest.TestCase ):
'''simple docstring'''
a__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : int = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "clusters" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
A : List[str] = self.image_processing_class(**self.image_processor_dict )
A : Any = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
A : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = os.path.join(snake_case_ , "image_processor.json" )
image_processor_first.to_json_file(snake_case_ )
A : List[str] = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
A : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
A : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
A : Union[str, Any] = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
A : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
pass
def UpperCAmelCase ( ):
A : Optional[Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
A : List[Any] = Image.open(dataset[4]["file"] )
A : List[Any] = Image.open(dataset[5]["file"] )
A : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
A : Optional[int] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
A : int = prepare_images()
# test non-batched
A : List[str] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
A : Tuple = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
A : Dict = image_processing(snake_case_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
A : int = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ ) | 719 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (PNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]:
A : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple:
A : Dict = dict(self.forward_default_kwargs )
A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : Union[str, Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Any = self.get_scheduler_config(**__lowerCamelCase )
A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str:
A : List[str] = dict(self.forward_default_kwargs )
A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : List[str] = self.dummy_sample
A : Any = 0.1 * sample
A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Tuple = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[:]
A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(**__lowerCamelCase )
A : str = scheduler_class(**__lowerCamelCase )
A : List[str] = 10
A : Union[str, Any] = self.dummy_model()
A : int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A : Tuple = model(__lowerCamelCase , __lowerCamelCase )
A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : Union[str, Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
A : List[Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A : str = 27
for scheduler_class in self.scheduler_classes:
A : Tuple = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Optional[Any] = self.full_loop()
A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = self.full_loop(prediction_type="v_prediction" )
A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 17 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE = 500000
__SCREAMING_SNAKE_CASE = os.path.split(__file__)
__SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCAmelCase ( _lowerCamelCase , **_lowerCamelCase ):
A : int = dataset.map(**__UpperCamelCase )
@get_duration
def UpperCAmelCase ( _lowerCamelCase , **_lowerCamelCase ):
A : Optional[Any] = dataset.filter(**__UpperCamelCase )
def UpperCAmelCase ( ):
A : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A : str = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
A : Tuple = generate_example_dataset(
os.path.join(__UpperCamelCase , "dataset.arrow" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
A : Dict = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__UpperCamelCase )
def tokenize(_lowerCamelCase ):
return tokenizer(examples["text"] )
A : Optional[int] = map(__UpperCamelCase )
A : int = map(__UpperCamelCase , batched=__UpperCamelCase )
A : int = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="numpy" ):
A : List[str] = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="pandas" ):
A : str = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
A : str = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
A : int = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
A : Tuple = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
A : Union[str, Any] = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , "wb" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 720 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
def __get__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple=None ) -> Dict:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A : Tuple = "__cached_" + self.fget.__name__
A : Optional[Any] = getattr(A_ , A_ , A_ )
if cached is None:
A : Union[str, Any] = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def UpperCAmelCase ( _lowerCamelCase ):
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def UpperCAmelCase ( _lowerCamelCase ):
return isinstance(snake_case__ , np.ndarray )
def UpperCAmelCase ( _lowerCamelCase ):
return _is_numpy(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import torch
return isinstance(snake_case__ , torch.Tensor )
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import torch
return isinstance(snake_case__ , torch.device )
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
A : Dict = getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def UpperCAmelCase ( _lowerCamelCase ):
return False if not is_flax_available() else _is_jax(snake_case__ )
def UpperCAmelCase ( _lowerCamelCase ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCAmelCase ( _lowerCamelCase ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
A : Dict = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
A : int = getattr(self , class_fields[0].name )
A : List[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
A : List[str] = first_field.items()
A : Dict = True
else:
try:
A : str = iter(A_ )
A : int = True
except TypeError:
A : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A : Optional[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A : Dict = element[1]
elif first_field is not None:
A : Tuple = first_field
else:
for field in class_fields:
A : Dict = getattr(self , field.name )
if v is not None:
A : List[Any] = v
def __delitem__( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> Tuple:
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> Dict:
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Dict:
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Dict:
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : int , __lowerCamelCase : Any ) -> str:
if isinstance(A_ , A_ ):
A : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> int:
# Will raise a KeyException if needed
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class lowerCamelCase_ ( _lowercase ,_lowercase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : int ) -> Tuple:
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ = '''longest'''
a__ = '''max_length'''
a__ = '''do_not_pad'''
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ = '''pt'''
a__ = '''tf'''
a__ = '''np'''
a__ = '''jax'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[ContextManager] ) -> Any:
A : Union[str, Any] = context_managers
A : Optional[Any] = ExitStack()
def __enter__( self : Tuple ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self : int , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> Any:
self.stack.__exit__(*A_ , **A_ )
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = infer_framework(snake_case__ )
if framework == "tf":
A : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
A : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCAmelCase ( _lowerCamelCase ):
A : str = model_class.__name__
A : List[Any] = infer_framework(snake_case__ )
if framework == "tf":
A : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
A : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
A : Tuple = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None ):
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(snake_case__ )}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(snake_case__ )}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None ):
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(snake_case__ )}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def UpperCAmelCase ( _lowerCamelCase ):
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
A : Optional[Any] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A : Optional[Any] = f"""{repo_id}--{value}"""
return auto_map
def UpperCAmelCase ( _lowerCamelCase ):
for base_class in inspect.getmro(snake_case__ ):
A : Dict = base_class.__module__
A : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" ) | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase ( _lowerCamelCase ):
def decorator(_lowerCamelCase ):
A : List[Any] = getattr(__SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE , "handle_key" , __SCREAMING_SNAKE_CASE )
return func
return decorator
def UpperCAmelCase ( *_lowerCamelCase ):
def decorator(_lowerCamelCase ):
A : List[Any] = getattr(__SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE , "handle_key" , __SCREAMING_SNAKE_CASE )
return func
return decorator
class lowerCamelCase_ ( lowercase__ ):
'''simple docstring'''
def __new__( cls : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A : Dict = super().__new__(cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not hasattr(__lowerCamelCase , "key_handler" ):
setattr(__lowerCamelCase , "key_handler" , {} )
setattr(__lowerCamelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
A : Dict = getattr(__lowerCamelCase , "handle_key" , [] )
for key in handled_keys:
A : Tuple = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict ) -> Any:
A : List[str] = get_character()
if char != KEYMAP["undefined"]:
A : Union[str, Any] = ord(__lowerCamelCase )
A : Optional[Any] = cls.key_handler.get(__lowerCamelCase )
if handler:
A : Dict = char
return handler(cls )
else:
return None
def UpperCAmelCase ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
from __future__ import annotations
__SCREAMING_SNAKE_CASE = [True] * 1000001
__SCREAMING_SNAKE_CASE = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__SCREAMING_SNAKE_CASE = False
i += 1
def UpperCAmelCase ( _lowerCamelCase ):
return seive[n]
def UpperCAmelCase ( _lowerCamelCase ):
return any(digit in "02468" for digit in str(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : Optional[int] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_lowerCamelCase ) and not contains_an_even_digit(_lowerCamelCase ):
A : str = str(_lowerCamelCase )
A : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCamelCase ) )]
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
result.append(_lowerCamelCase )
return result
def UpperCAmelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""") | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger("""transformers.models.speecht5""")
__SCREAMING_SNAKE_CASE = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
__SCREAMING_SNAKE_CASE = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
__SCREAMING_SNAKE_CASE = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
__SCREAMING_SNAKE_CASE = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
__SCREAMING_SNAKE_CASE = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
__SCREAMING_SNAKE_CASE = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
__SCREAMING_SNAKE_CASE = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
__SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
__SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
__SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
__SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : Union[str, Any] = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
A : Union[str, Any] = getattr(snake_case_ , snake_case_ ).shape
else:
A : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A : Dict = value
elif weight_type == "weight_g":
A : List[Any] = value
elif weight_type == "weight_v":
A : int = value
elif weight_type == "bias":
A : List[Any] = value
elif weight_type == "running_mean":
A : Optional[Any] = value
elif weight_type == "running_var":
A : Dict = value
elif weight_type == "num_batches_tracked":
A : Optional[int] = value
else:
A : Tuple = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = []
if task == "s2t":
A : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
A : int = MAPPING_S2T
A : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
A : str = None
A : Optional[Any] = MAPPING_T2S
A : Tuple = IGNORE_KEYS_T2S
elif task == "s2s":
A : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
A : Any = MAPPING_S2S
A : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(f"""{name} was ignored""" )
continue
A : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
A : Dict = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A : Tuple = key.split(".*." )
if prefix in name and suffix in name:
A : Union[str, Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A : Optional[Any] = True
if "*" in mapped_key:
A : Dict = name.split(snake_case_ )[0].split("." )[-2]
A : List[Any] = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
A : Any = "weight_g"
elif "weight_v" in name:
A : Optional[int] = "weight_v"
elif "bias" in name:
A : List[str] = "bias"
elif "weight" in name:
A : Tuple = "weight"
elif "running_mean" in name:
A : Any = "running_mean"
elif "running_var" in name:
A : Dict = "running_var"
elif "num_batches_tracked" in name:
A : Optional[int] = "num_batches_tracked"
else:
A : List[str] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = full_name.split("conv_layers." )[-1]
A : Optional[Any] = name.split("." )
A : Optional[Any] = int(items[0] )
A : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if config_path is not None:
A : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ )
else:
A : List[Any] = SpeechTaConfig()
if task == "s2t":
A : List[Any] = config.max_text_positions
A : int = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
A : List[str] = 1876
A : List[Any] = 600
A : List[Any] = config.max_speech_positions
A : Any = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
A : Tuple = 1876
A : int = config.max_speech_positions
A : Optional[int] = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
A : Any = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A : List[str] = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ )
A : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A : Any = SpeechTaFeatureExtractor()
A : Union[str, Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
A : str = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : list[list[int]] ) -> List[Any]:
A : Optional[int] = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__lowerCamelCase ) != 0:
A : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(__lowerCamelCase , (int, float) ):
raise error
A : Tuple = rows
else:
A : Dict = []
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> bool:
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Matrix:
A : int = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
A : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowerCamelCase ).determinant()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__lowerCamelCase , __lowerCamelCase )
return -1 * self.get_minor(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Matrix:
return Matrix(
[
[self.get_minor(__lowerCamelCase , __lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Matrix:
A : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Matrix:
A : List[str] = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ) -> str:
return str(self.rows )
def __str__( self : str ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__lowerCamelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : list[int] , __lowerCamelCase : int | None = None ) -> None:
A : Dict = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(__lowerCamelCase , (int, float) ):
raise type_error
if len(__lowerCamelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__lowerCamelCase )
else:
A : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : list[int] , __lowerCamelCase : int | None = None ) -> None:
A : List[Any] = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(__lowerCamelCase , (int, float) ):
raise type_error
if len(__lowerCamelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
A : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , __lowerCamelCase : object ) -> bool:
return not self == other
def __neg__( self : List[str] ) -> Matrix:
return self * -1
def __add__( self : Union[str, Any] , __lowerCamelCase : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , __lowerCamelCase : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , __lowerCamelCase : Matrix | int | float ) -> Matrix:
if isinstance(__lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__lowerCamelCase , __lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : List[Any] , __lowerCamelCase : int ) -> Matrix:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
A : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase = 400_0000 ):
A : Dict = [0, 1]
A : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A : Optional[int] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = """▁"""
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a ,unittest.TestCase ):
'''simple docstring'''
a__ = BigBirdTokenizer
a__ = BigBirdTokenizerFast
a__ = True
a__ = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
super().setUp()
A : Union[str, Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Any = """<s>"""
A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
if not self.test_rust_tokenizer:
return
A : Optional[int] = self.get_tokenizer()
A : Any = self.get_rust_tokenizer()
A : Optional[Any] = """I was born in 92000, and this is falsé."""
A : Tuple = tokenizer.tokenize(a_ )
A : Tuple = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
A : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
A : Optional[Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
A : Tuple = self.get_rust_tokenizer()
A : Union[str, Any] = tokenizer.encode(a_ )
A : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A : int = BigBirdTokenizer(a_ , keep_accents=a_ )
A : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
A : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = """Hello World!"""
A : List[str] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
A : Union[str, Any] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
A : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
A : List[Any] = """ """.join(a_ )
A : Dict = self.big_tokenizer.encode_plus(a_ , return_tensors="pt" , return_token_type_ids=a_ )
A : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a_ )
A : Any = BigBirdConfig(attention_type="original_full" )
A : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
A : Optional[Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
A : Optional[int] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A : Any = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , ) | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
import torch
from torch import nn
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : str=False ) -> Optional[Any]:
super().__init__()
A : str = n_token
A : Optional[Any] = d_embed
A : str = d_proj
A : Optional[Any] = cutoffs + [n_token]
A : Dict = [0] + self.cutoffs
A : Tuple = div_val
A : int = self.cutoffs[0]
A : str = len(self.cutoffs ) - 1
A : List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A : str = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A : str = nn.Parameter(torch.zeros(self.n_clusters ) )
A : str = nn.ModuleList()
A : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
A , A : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : List[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
A : Any = keep_order
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Dict:
if proj is None:
A : Dict = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A : int = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
A : int = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=False ) -> Any:
if labels is not None:
# Shift so that tokens < n predict n
A : int = hidden[..., :-1, :].contiguous()
A : Tuple = labels[..., 1:].contiguous()
A : int = hidden.view(-1 , hidden.size(-1 ) )
A : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
A : Tuple = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A : Optional[int] = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A : str = labels != -1_00
A : Union[str, Any] = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
A : List[Any] = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A : Dict = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
A , A : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A , A : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
A : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
A : List[str] = self.out_layers[i].weight
A : Union[str, Any] = self.out_layers[i].bias
if i == 0:
A : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
A , A , A : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
A : str = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A : Any = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
A : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A : List[str] = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
A : int = 0
A : int = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
A , A : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A : Optional[Any] = (labels >= l_idx) & (labels < r_idx)
A : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A : Union[str, Any] = labels.index_select(0 , __lowerCAmelCase ) - l_idx
A : Union[str, Any] = head_logprob.index_select(0 , __lowerCAmelCase )
A : List[Any] = hidden.index_select(0 , __lowerCAmelCase )
else:
A : List[Any] = hidden
if i == 0:
if labels is not None:
A : Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A : int = head_logprob[:, : self.cutoffs[0]]
else:
A , A , A : List[str] = weights[i], biases[i], self.out_projs[i]
A : Optional[Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A : int = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
A : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] ) -> List[str]:
if self.n_clusters == 0:
A : Optional[Any] = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
A , A : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A , A : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
A : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
A : List[str] = self.out_layers[i].weight
A : Optional[int] = self.out_layers[i].bias
if i == 0:
A : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
A , A , A : Optional[Any] = weights[0], biases[0], self.out_projs[0]
A : Tuple = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
A : Tuple = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
A : List[str] = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
A , A : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A : int = head_logprob[:, : self.cutoffs[0]]
else:
A , A , A : Tuple = weights[i], biases[i], self.out_projs[i]
A : List[str] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A : List[str] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
A : Any = head_logprob[:, -i] + tail_logprob_i
A : List[str] = logprob_i
return out | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : str = 0
A : int = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A : Dict = i + 1
else:
A : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""") | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCamelCase_ ( a__ ):
'''simple docstring'''
a__ = """summarization"""
a__ = ["""loss"""]
a__ = ROUGE_KEYS
a__ = """rouge2"""
def __init__( self : Any , __lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
A : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
A : Optional[Any] = Path(self.output_dir ) / "metrics.json"
A : int = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
A : List[Any] = 0
A : Union[str, Any] = defaultdict(lowerCAmelCase__ )
A : Any = self.config.model_type
A : str = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
A : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A : Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
A : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A : str = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A : Optional[Any] = get_git_info()["repo_sha"]
A : List[Any] = hparams.num_workers
A : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
A : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A : Optional[Any] = self.decoder_start_token_id
A : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
A : Optional[Any] = False
A : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A : Any = self.hparams.eval_max_gen_length
else:
A : Optional[int] = self.model.config.max_length
A : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict[str, List[str]]:
A : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
A : Any = True
return readable_batch
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> int:
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Dict ) -> int:
A : str = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[Any] ) -> Tuple:
A : List[Any] = self.tokenizer.pad_token_id
A : Optional[int] = batch["input_ids"], batch["attention_mask"]
A : Tuple = batch["labels"]
if isinstance(self.model , lowerCAmelCase__ ):
A : Optional[int] = self.model._shift_right(lowerCAmelCase__ )
else:
A : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A : List[str] = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
A : Any = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
A : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A : List[str] = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A : Tuple = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
A : Any = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Dict:
A : Optional[int] = self._step(lowerCAmelCase__ )
A : Dict = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
A : List[Any] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
A : List[Any] = batch["input_ids"].shape[0]
A : Any = batch["input_ids"].eq(self.pad ).sum()
A : str = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Dict:
return self._generative_step(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]="val" ) -> Dict:
self.step_count += 1
A : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A : int = losses["loss"]
A : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
A : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
A : Optional[int] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
A : List[Any] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
A : Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> Dict:
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : int ) -> dict:
A : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A : Optional[int] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A : str = (time.time() - ta) / batch["input_ids"].shape[0]
A : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
A : List[str] = self.ids_to_clean_text(batch["labels"] )
A : int = self._step(lowerCAmelCase__ )
A : int = dict(zip(self.loss_names , lowerCAmelCase__ ) )
A : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
A : Optional[int] = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
return self._generative_step(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Dict ) -> Optional[Any]:
return self.validation_epoch_end(lowerCAmelCase__ , prefix="test" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[Any] ) -> SeqaSeqDataset:
A : Union[str, Any] = self.n_obs[type_path]
A : List[str] = self.target_lens[type_path]
A : Any = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : str = False ) -> DataLoader:
A : Tuple = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A : int = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> DataLoader:
A : List[str] = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"--max_source_length" , default=10_24 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=1_42 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=1_42 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--max_tokens_per_batch" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("--logger_name" , type=lowerCAmelCase__ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=lowerCAmelCase__ , default=5_00 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=lowerCAmelCase__ , default="summarization" , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("--src_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--eval_beams" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"--val_metric" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowerCamelCase_ ( a__ ):
'''simple docstring'''
a__ = """translation"""
a__ = ["""loss"""]
a__ = ["""bleu"""]
a__ = """bleu"""
def __init__( self : List[str] , __lowerCamelCase : Any , **__lowerCamelCase : int ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
A : Optional[Any] = hparams.src_lang
A : Union[str, Any] = hparams.tgt_lang
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> dict:
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
A : SummarizationModule = SummarizationModule(_lowercase )
else:
A : SummarizationModule = TranslationModule(_lowercase )
A : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
A : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A : Optional[int] = os.environ.get("WANDB_PROJECT" , _lowercase )
A : Dict = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A : str = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
A : Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
A : Optional[int] = False
A : Optional[Any] = args.val_metric == "loss"
A : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
A : Optional[Any] = ""
A : Dict = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_lowercase ) )
if checkpoints:
A : Optional[int] = checkpoints[-1]
A : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser)
__SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args) | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
import sys
from collections import defaultdict
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> List[str]:
A : int = []
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : str ) -> Dict:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> List[str]:
A : str = pos
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A : Tuple = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A : Optional[int] = 2 * start + 1
else:
A : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A : Dict = heap[smallest_child], positions[smallest_child]
A , A : Union[str, Any] = (
heap[start],
positions[start],
)
A , A : List[Any] = temp, tempa
A : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCAmelCase )
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : str ) -> List[str]:
A : Dict = position[index]
while index != 0:
A : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A : Any = heap[parent]
A : Optional[int] = position[parent]
self.set_position(position[parent] , __lowerCAmelCase )
else:
A : Dict = val
A : int = temp
self.set_position(__lowerCAmelCase , __lowerCAmelCase )
break
A : int = parent
else:
A : Dict = val
A : Optional[int] = temp
self.set_position(__lowerCAmelCase , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> int:
A : Optional[Any] = len(__lowerCAmelCase ) // 2 - 1
for i in range(__lowerCAmelCase , -1 , -1 ):
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> str:
A : Union[str, Any] = positions[0]
A : Dict = sys.maxsize
self.top_to_bottom(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
return temp
def UpperCAmelCase ( _lowerCamelCase ):
A : List[Any] = Heap()
A : Any = [0] * len(UpperCAmelCase__ )
A : Dict = [-1] * len(UpperCAmelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A : int = [] # Heap of Distance of vertices from their neighboring vertex
A : Optional[int] = []
for vertex in range(len(UpperCAmelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase__ )
heap.node_position.append(UpperCAmelCase__ )
A : str = []
A : List[str] = 1
A : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A : Union[str, Any] = 0
A : Optional[int] = distance
heap.heapify(UpperCAmelCase__ , UpperCAmelCase__ )
for _ in range(1 , len(UpperCAmelCase__ ) ):
A : Dict = heap.delete_minimum(UpperCAmelCase__ , UpperCAmelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase__ )]
):
A : str = distance
heap.bottom_to_top(
UpperCAmelCase__ , heap.get_position(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ )
A : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE = int(input("""Enter number of edges: """).strip())
__SCREAMING_SNAKE_CASE = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__SCREAMING_SNAKE_CASE = """bart"""
__SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=_snake_case )
def UpperCAmelCase ( ):
if LOAD_DENSE_INDEX:
A : List[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
A : Dict = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
A : Optional[int] = qar_model.eval()
else:
A , A : str = (None, None)
if MODEL_TYPE == "bart":
A : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
A : Any = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
A : List[Any] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
A : str = sas_model.eval()
else:
A , A : str = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_snake_case )
def UpperCAmelCase ( ):
if LOAD_DENSE_INDEX:
A : Optional[int] = faiss.StandardGpuResources()
A : Tuple = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
A : Optional[Any] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
A : Optional[int] = faiss.IndexFlatIP(128 )
A : str = faiss.index_cpu_to_gpu(_snake_case , 1 , _snake_case )
wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU
else:
A , A : Optional[int] = (None, None)
A : Optional[Any] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_snake_case )
def UpperCAmelCase ( ):
A : Optional[Any] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
A : List[str] = elia["train_eli5"]
A : List[Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
A : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_snake_case )
return (elia_train, eli5_train_q_index)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_indexes()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_models()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_train_data()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=10 ):
A : Dict = embed_questions_for_retrieval([question] , _snake_case , _snake_case )
A , A : List[str] = eli5_train_q_index.search(_snake_case , _snake_case )
A : Dict = [elia_train[int(_snake_case )] for i in I[0]]
return nn_examples
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ):
if source == "none":
A , A : Union[str, Any] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
A , A : Optional[Any] = query_qa_dense_index(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
A , A : str = query_es_index(
_snake_case , _snake_case , index_name="english_wiki40b_snippets_100w" , n_results=_snake_case , )
A : List[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
A : List[str] = "question: {} context: {}".format(_snake_case , _snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ):
with torch.no_grad():
A : Optional[int] = qa_sas_generate(
_snake_case , _snake_case , _snake_case , num_answers=1 , num_beams=_snake_case , min_len=_snake_case , max_len=_snake_case , do_sample=_snake_case , temp=_snake_case , top_p=_snake_case , top_k=_snake_case , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
__SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__SCREAMING_SNAKE_CASE = action_list.index(action_st)
__SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__SCREAMING_SNAKE_CASE = """wiki40b"""
__SCREAMING_SNAKE_CASE = """dense"""
__SCREAMING_SNAKE_CASE = """beam"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 64
__SCREAMING_SNAKE_CASE = 256
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
__SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__SCREAMING_SNAKE_CASE = None
# start main text
__SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
__SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__SCREAMING_SNAKE_CASE = support_list[:10]
__SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
__SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
__SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
__SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__SCREAMING_SNAKE_CASE = find_nearest_training(question)
__SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
A : List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase__ ) , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , x.transpose() ) )
A : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : Tuple = np.random.randn(3 , 4 )
A : str = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
A : Optional[int] = np.random.randn(3 , 4 , 5 )
A : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
A : List[Any] = np.random.randn(3 , 4 )
A : Union[str, Any] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
A : Any = np.random.randn(3 , 4 , 5 )
A : Dict = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[str] = np.random.randn(3 , 4 )
A : List[str] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , np.asarray(transpose(UpperCamelCase__ ) ) ) )
A : Any = np.random.randn(3 , 4 , 5 )
A : Any = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.reshape(UpperCamelCase__ , (4, 3) ) ) )
A : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.reshape(UpperCamelCase__ , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
A : Dict = np.random.randn(3 , 4 )
A : Union[str, Any] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
A : int = np.random.randn(3 , 4 , 5 )
A : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
A : Optional[int] = np.random.randn(3 , 4 )
A : Optional[int] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
A : Any = np.random.randn(3 , 4 , 5 )
A : Optional[int] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
A : Optional[int] = np.random.randn(3 , 4 )
A : Union[str, Any] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.asarray(reshape(UpperCamelCase__ , (4, 3) ) ) ) )
A : int = np.random.randn(3 , 4 , 5 )
A : List[str] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.asarray(reshape(UpperCamelCase__ , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.squeeze(UpperCamelCase__ ) ) )
A : Dict = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.squeeze(UpperCamelCase__ , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : List[Any] = np.random.randn(1 , 3 , 4 )
A : List[str] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
A : Dict = np.random.randn(1 , 4 , 1 , 5 )
A : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
A : int = np.random.randn(1 , 3 , 4 )
A : Dict = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
A : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
A : Optional[Any] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : int = np.random.randn(1 , 3 , 4 )
A : str = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.asarray(squeeze(UpperCamelCase__ ) ) ) )
A : str = np.random.randn(1 , 4 , 1 , 5 )
A : Optional[int] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.asarray(squeeze(UpperCamelCase__ , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
A : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.expand_dims(UpperCamelCase__ , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : List[Any] = np.random.randn(3 , 4 )
A : Optional[Any] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
A : int = np.random.randn(3 , 4 )
A : str = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : Union[str, Any] = np.random.randn(3 , 4 )
A : Optional[int] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase__ , axis=1 ) ) ) ) | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class lowerCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
a__ = 'codegen'
a__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=5_04_00 , __lowerCamelCase : Tuple=20_48 , __lowerCamelCase : Dict=20_48 , __lowerCamelCase : List[Any]=40_96 , __lowerCamelCase : str=28 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]="gelu_new" , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=5_02_56 , __lowerCamelCase : Dict=5_02_56 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> str:
A : Optional[Any] = vocab_size
A : Any = n_ctx
A : str = n_positions
A : Any = n_embd
A : List[Any] = n_layer
A : Tuple = n_head
A : List[Any] = n_inner
A : List[Any] = rotary_dim
A : Tuple = activation_function
A : Any = resid_pdrop
A : List[str] = embd_pdrop
A : List[Any] = attn_pdrop
A : Optional[Any] = layer_norm_epsilon
A : Optional[Any] = initializer_range
A : Tuple = use_cache
A : Any = bos_token_id
A : str = eos_token_id
super().__init__(
bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : str = "default" , __lowerCamelCase : List[PatchingSpec] = None , __lowerCamelCase : bool = False , ) -> Optional[Any]:
super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase )
if not getattr(self._config , "pad_token_id" , __lowerCamelCase ):
# TODO: how to do that better?
A : Dict = 0
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
A : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
A : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
return self._config.n_head
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
A : Union[str, Any] = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
A : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A : Dict = seqlen + 2
A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Any = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
A : Dict = common_inputs["""attention_mask"""]
if self.use_past:
A : int = ordered_inputs["""attention_mask"""].dtype
A : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
return 13 | 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCAmelCase ( _lowerCamelCase ):
if "://" in dataset_path:
A : Any = dataset_path.split("://" )[1]
return dataset_path
def UpperCAmelCase ( _lowerCamelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : int = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( ):
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A : Optional[int] = None
A : List[str] = None
A : Union[str, Any] = threading.Lock() | 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 17 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return math.pow(_lowerCAmelCase , 2 ) - a
def UpperCAmelCase ( _lowerCamelCase ):
return 2 * x
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = 2.0
while start <= a:
A : Dict = math.pow(_lowerCAmelCase , 2 )
return start
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 9999 , _lowerCamelCase = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("math domain error" )
A : str = get_initial_point(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
A : List[str] = value
A : Optional[int] = value - fx(_lowerCAmelCase , _lowerCAmelCase ) / fx_derivative(_lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 715 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 17 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
A : int = flatten_dict(_lowerCamelCase )
return flax_params
def UpperCAmelCase ( _lowerCamelCase ):
A : List[str] = {}
A : Dict = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
A : Optional[int] = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
A : Optional[int] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
A : int = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
A : Any = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
A : List[str] = re.sub(R"layers_(\d+)" , R"layer.\1" , _lowerCamelCase )
A : Dict = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
A : List[str] = re.sub(R"layers_(\d+)" , R"layer.\1" , _lowerCamelCase )
A : Tuple = flax_dict[key]
A : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
A : Any = torch.from_numpy(converted_dict[key].T )
else:
A : int = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
A : Optional[int] = get_flax_param(_lowerCamelCase )
if not use_large:
A : Dict = PixaStructVisionConfig()
A : List[Any] = PixaStructTextConfig()
else:
A : Optional[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
A : List[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
A : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
A : Optional[int] = PixaStructForConditionalGeneration(_lowerCamelCase )
A : List[str] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
A : str = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
A : Tuple = PixaStructImageProcessor()
A : int = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
A : int = 4096
A : Tuple = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print("Model saved in {}".format(_lowerCamelCase ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
) | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
a__ = 42
a__ = 42
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 42
a__ = (16, 32, 96, 256)
a__ = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
A : Union[str, Any] = self.block_out_channels[i]
A : List[Any] = self.block_out_channels[i + 1]
A : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
A : Tuple = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
A : Optional[int] = blocks
A : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , __lowerCamelCase : Tuple ) -> int:
A : Union[str, Any] = self.conv_in(lowerCamelCase__ )
A : Optional[Any] = nn.silu(lowerCamelCase__ )
for block in self.blocks:
A : str = block(lowerCamelCase__ )
A : Union[str, Any] = nn.silu(lowerCamelCase__ )
A : Union[str, Any] = self.conv_out(lowerCamelCase__ )
return embedding
@flax_register_to_config
class lowerCamelCase_ ( nn.Module ,UpperCAmelCase__ ,UpperCAmelCase__ ):
'''simple docstring'''
a__ = 32
a__ = 4
a__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__ = False
a__ = (320, 640, 1280, 1280)
a__ = 2
a__ = 8
a__ = None
a__ = 1280
a__ = 0.0
a__ = False
a__ = jnp.floataa
a__ = True
a__ = 0
a__ = "rgb"
a__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : jax.random.KeyArray ) -> FrozenDict:
A : str = (1, self.in_channels, self.sample_size, self.sample_size)
A : Optional[Any] = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
A : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
A : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
A : int = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
A : List[Any] = jax.random.split(lowerCamelCase__ )
A : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["params"]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = self.block_out_channels
A : List[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
A : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A : Optional[Any] = FlaxTimestepEmbedding(lowerCamelCase__ , dtype=self.dtype )
A : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A : int = self.only_cross_attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
A : Dict = []
A : List[str] = []
A : Dict = block_out_channels[0]
A : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
A : Dict = output_channel
A : Optional[Any] = block_out_channels[i]
A : int = i == len(lowerCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A : Union[str, Any] = FlaxDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase__ )
for _ in range(self.layers_per_block ):
A : Dict = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
if not is_final_block:
A : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
A : List[str] = down_blocks
A : List[Any] = controlnet_down_blocks
# mid
A : Tuple = block_out_channels[-1]
A : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
A : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A : Optional[Any] = jnp.flip(lowerCamelCase__ , axis=1 )
# 1. time
if not isinstance(lowerCamelCase__ , jnp.ndarray ):
A : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A : List[str] = timesteps.astype(dtype=jnp.floataa )
A : Optional[int] = jnp.expand_dims(lowerCamelCase__ , 0 )
A : Any = self.time_proj(lowerCamelCase__ )
A : Optional[Any] = self.time_embedding(lowerCamelCase__ )
# 2. pre-process
A : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
A : Dict = self.conv_in(lowerCamelCase__ )
A : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
A : Optional[Any] = self.controlnet_cond_embedding(lowerCamelCase__ )
sample += controlnet_cond
# 3. down
A : int = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A : str = down_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
else:
A : List[str] = down_block(lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A : Optional[int] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
# 5. contronet blocks
A : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase__ , self.controlnet_down_blocks ):
A : Dict = controlnet_block(lowerCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A : Dict = controlnet_down_block_res_samples
A : Dict = self.controlnet_mid_block(lowerCamelCase__ )
# 6. scaling
A : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase__ , mid_block_res_sample=lowerCamelCase__ ) | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = 0
A : Tuple = len(lowercase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( _lowerCamelCase ):
if len(lowercase__ ) <= 1:
return arr, 0
A : Optional[Any] = len(lowercase__ ) // 2
A : int = arr[0:mid]
A : Dict = arr[mid:]
A , A : Dict = count_inversions_recursive(lowercase__ )
A , A : Tuple = count_inversions_recursive(lowercase__ )
A , A : Union[str, Any] = _count_cross_inversions(lowercase__ , lowercase__ )
A : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = []
A : Optional[int] = 0
while i < len(lowercase__ ) and j < len(lowercase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase ( ):
A : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A : str = count_inversions_bf(lowercase__ )
A , A : List[str] = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowercase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A : str = count_inversions_bf(lowercase__ )
A , A : Tuple = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase__ )
# an empty list should also have zero inversions
A : List[Any] = []
A : int = count_inversions_bf(lowercase__ )
A , A : Tuple = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase__ )
if __name__ == "__main__":
main() | 718 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 719 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (PNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]:
A : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple:
A : Dict = dict(self.forward_default_kwargs )
A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : Union[str, Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Any = self.get_scheduler_config(**__lowerCamelCase )
A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str:
A : List[str] = dict(self.forward_default_kwargs )
A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : List[str] = self.dummy_sample
A : Any = 0.1 * sample
A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Tuple = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[:]
A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(**__lowerCamelCase )
A : str = scheduler_class(**__lowerCamelCase )
A : List[str] = 10
A : Union[str, Any] = self.dummy_model()
A : int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A : Tuple = model(__lowerCamelCase , __lowerCamelCase )
A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : Union[str, Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
A : List[Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A : str = 27
for scheduler_class in self.scheduler_classes:
A : Tuple = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Optional[Any] = self.full_loop()
A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = self.full_loop(prediction_type="v_prediction" )
A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 17 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase_ ( _A ,_A ):
'''simple docstring'''
a__ = "pixel_values"
a__ = False
a__ = TimmBackboneConfig
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , **__lowerCamelCase : Any ) -> List[Any]:
requires_backends(self , "timm" )
super().__init__(UpperCAmelCase__ )
A : Optional[int] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(UpperCAmelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
A : List[Any] = getattr(UpperCAmelCase__ , "use_pretrained_backbone" , UpperCAmelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
A : Dict = config.out_indices if getattr(UpperCAmelCase__ , "out_indices" , UpperCAmelCase__ ) is not None else (-1,)
A : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A : List[Any] = self._backbone.return_layers
A : Optional[int] = {layer["module"]: str(UpperCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
A : Dict = kwargs.pop("config" , TimmBackboneConfig() )
A : Any = kwargs.pop("use_timm_backbone" , UpperCAmelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
A : List[str] = kwargs.pop("num_channels" , config.num_channels )
A : str = kwargs.pop("features_only" , config.features_only )
A : Optional[int] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
A : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
A : List[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , )
return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[str] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
A : int = return_dict if return_dict is not None else self.config.use_return_dict
A : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A : Any = self._all_layers
A : str = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ )
A : Union[str, Any] = self._return_layers
A : Dict = tuple(hidden_states[i] for i in self.out_indices )
else:
A : Dict = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ )
A : Dict = None
A : List[str] = tuple(UpperCAmelCase__ )
A : Optional[Any] = tuple(UpperCAmelCase__ ) if hidden_states is not None else None
if not return_dict:
A : Dict = (feature_maps,)
if output_hidden_states:
A : Any = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__ ) | 720 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=__a ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ = field(
default=__a ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a__ = field(
default=__a ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
a__ = field(default=__a ,metadata={"help": "Whether tp freeze the encoder."} )
a__ = field(default=__a ,metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
a__ = field(
default="summarization" ,metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} ,)
a__ = field(
default=1024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
a__ = field(
default=128 ,metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
a__ = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} ,)
a__ = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
a__ = field(default=-1 ,metadata={"help": "# training examples. -1 means use all."} )
a__ = field(default=-1 ,metadata={"help": "# validation examples. -1 means use all."} )
a__ = field(default=-1 ,metadata={"help": "# test examples. -1 means use all."} )
a__ = field(default=__a ,metadata={"help": "Source language id for translation."} )
a__ = field(default=__a ,metadata={"help": "Target language id for translation."} )
a__ = field(default=__a ,metadata={"help": "# num_beams to use for evaluation."} )
a__ = field(
default=__a ,metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} ,)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , f"""{split}_results.json""" ) )
def UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A : int = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
assert hasattr(_lowerCamelCase , _lowerCamelCase ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
A : Union[str, Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
A : Optional[int] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
A : List[Any] = SeqaSeqDataset
# Get datasets
A : Any = (
dataset_class(
_lowerCamelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
A : str = (
dataset_class(
_lowerCamelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
A : Union[str, Any] = (
dataset_class(
_lowerCamelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
A : Any = (
build_compute_metrics_fn(data_args.task , _lowerCamelCase ) if training_args.predict_with_generate else None
)
A : List[Any] = SeqaSeqTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , data_args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , data_collator=SeqaSeqDataCollator(
_lowerCamelCase , _lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
A : Dict = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
A : List[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
A : List[str] = train_result.metrics
A : Union[str, Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCamelCase , training_args.output_dir )
all_metrics.update(_lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A : List[Any] = trainer.evaluate(metric_key_prefix="val" )
A : Optional[int] = data_args.n_val
A : List[str] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCamelCase , training_args.output_dir )
all_metrics.update(_lowerCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
A : str = trainer.predict(test_dataset=_lowerCamelCase , metric_key_prefix="test" )
A : List[Any] = test_output.metrics
A : str = data_args.n_test
if trainer.is_world_process_zero():
A : List[str] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCamelCase , training_args.output_dir )
all_metrics.update(_lowerCamelCase )
if training_args.predict_with_generate:
A : List[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
A : List[str] = lmap(str.strip , _lowerCamelCase )
write_txt_file(_lowerCamelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCamelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase ( _lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
__SCREAMING_SNAKE_CASE = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
A : str = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
A : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
A : Dict = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
A : int = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
A : Tuple = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
A : Optional[Any] = text_classifier("This is great !" , return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
A : List[Any] = text_classifier("This is great !" , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
A : Any = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
A : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
import torch
A : Optional[int] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
A : Optional[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : str = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
A : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
A : Tuple = pipeline("text-classification" )
A : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
A : List[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
A : List[str] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
A : int = pipeline("text-classification" , framework="tf" )
A : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
A : List[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
A : Optional[int] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
A : int = TextClassificationPipeline(model=lowercase__ , tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> List[Any]:
A : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A : Union[str, Any] = """HuggingFace is in"""
A : str = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
A : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
A : List[str] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}, {"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A : List[Any] = text_classifier(lowercase__ , top_k=lowercase__ )
A : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}] * N, [{"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}] * N] , )
A : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
A : Optional[int] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , {"label": ANY(lowercase__ ), "score": ANY(lowercase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A : Union[str, Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"label": ANY(lowercase__ ), "score": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "CLIPImageProcessor"
a__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Dict ) -> Any:
A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __snake_case , )
A : Dict = kwargs.pop("feature_extractor" )
A : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int=None , **__lowerCamelCase : str ) -> Tuple:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A : Dict = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
A : Union[str, Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
A : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> int:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
A : int = self.tokenizer.model_input_names
A : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __snake_case , )
return self.image_processor | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : int = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : Any = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Optional[Any] = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""") | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCAmelCase ( ):
A : int = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
A : List[Any] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
DownloadCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
RunCommand.register_subcommand(lowercase_ )
ServeCommand.register_subcommand(lowercase_ )
UserCommands.register_subcommand(lowercase_ )
AddNewModelCommand.register_subcommand(lowercase_ )
AddNewModelLikeCommand.register_subcommand(lowercase_ )
LfsCommands.register_subcommand(lowercase_ )
PTtoTFCommand.register_subcommand(lowercase_ )
# Let's go
A : Optional[Any] = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A : str = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main() | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( UpperCAmelCase_ ,unittest.TestCase ):
'''simple docstring'''
a__ = CTRLTokenizer
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Tuple = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
A : Any = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A : List[str] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
A : Any = {"unk_token": "<unk>"}
A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Tuple ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Dict ) -> str:
A : List[str] = "adapt react readapt apt"
A : Tuple = "adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
A : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A : Any = "adapt react readapt apt"
A : Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
A : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A : Any = tokens + [tokenizer.unk_token]
A : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__SCREAMING_SNAKE_CASE = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__SCREAMING_SNAKE_CASE = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__SCREAMING_SNAKE_CASE = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
import numpy as np
def UpperCAmelCase ( _lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( _lowerCamelCase ):
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
A : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if number < 0:
return False
A : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = "google/mobilebert-uncased"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
super().setUp()
A : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> int:
A : Optional[int] = "UNwant\u00E9d,running"
A : List[str] = "unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : int = self.tokenizer_class(self.vocab_file )
A : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
A : str = self.get_tokenizer()
A : Tuple = self.get_rust_tokenizer()
A : Optional[int] = "UNwant\u00E9d,running"
A : List[str] = tokenizer.tokenize(_lowerCamelCase )
A : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : Dict = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A : str = self.get_rust_tokenizer()
A : Optional[int] = tokenizer.encode(_lowerCamelCase )
A : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
A : Any = self.get_tokenizer(do_lower_case=_lowerCamelCase )
A : List[Any] = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
A : int = "UNwant\u00E9d,running"
A : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
A : int = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A : Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : Dict = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A : Dict = self.get_rust_tokenizer()
A : Union[str, Any] = tokenizer.encode(_lowerCamelCase )
A : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
A : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
A : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
A : int = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
A : List[str] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
A : Tuple = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[int] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : Optional[int] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
A : List[str] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : Dict = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A : Any = {}
for i, token in enumerate(_lowerCamelCase ):
A : Optional[int] = i
A : Optional[Any] = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
A : Optional[Any] = self.get_tokenizer()
A : str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
A : str = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCamelCase )
A : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCamelCase )
A : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A : int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A : Any = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A : List[Any] = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , "do_lower_case" ) else False
A : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
A : List[Any] = ["的", "人", "有"]
A : Dict = "".join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A : Dict = True
A : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A : Any = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : Dict = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A : Tuple = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A : Tuple = False
A : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A : str = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A : Tuple = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : Union[str, Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A : int = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A : Any = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A : List[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__SCREAMING_SNAKE_CASE = False
try:
__SCREAMING_SNAKE_CASE = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] = None , __lowerCamelCase : str = [] ) -> Tuple:
A : Any = 0
A : Optional[Any] = choices
A : Union[str, Any] = prompt
if sys.platform == "win32":
A : Optional[int] = "*"
else:
A : Optional[int] = "➔ "
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple = "" ) -> Optional[Any]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __lowerCamelCase )
else:
forceWrite(self.choices[index] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] ) -> int:
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__lowerCamelCase )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] = 1 ) -> Tuple:
A : Tuple = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowerCamelCase )
move_cursor(__lowerCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowerCamelCase )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
A : List[Any] = int(chr(self.current_selection ) )
A : str = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __lowerCamelCase )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
A : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowerCamelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
A : Dict = int(builtins.input() )
except ValueError:
A : Dict = default_choice
else:
A : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__lowerCamelCase , "\n" )
return choice
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( __lowercase ):
'''simple docstring'''
a__ = ['pixel_values']
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] = True , __lowerCamelCase : Optional[int] = 1 / 2_55 , __lowerCamelCase : Dict = True , __lowerCamelCase : Any = 8 , **__lowerCamelCase : Dict , ) -> None:
super().__init__(**__A )
A : Dict = do_rescale
A : List[Any] = rescale_factor
A : int = do_pad
A : List[Any] = pad_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str = None , **__lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple = None ) -> int:
A , A : Tuple = get_image_size(__A )
A : Any = (old_height // size + 1) * size - old_height
A : Any = (old_width // size + 1) * size - old_width
return pad(__A , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__A )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Any = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Tuple = None , __lowerCamelCase : Optional[int] = ChannelDimension.FIRST , **__lowerCamelCase : Union[str, Any] , ) -> Dict:
A : str = do_rescale if do_rescale is not None else self.do_rescale
A : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Tuple = do_pad if do_pad is not None else self.do_pad
A : int = pad_size if pad_size is not None else self.pad_size
A : int = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(__A ) for image in images]
if do_rescale:
A : Optional[int] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_pad:
A : Union[str, Any] = [self.pad(__A , size=__A ) for image in images]
A : Tuple = [to_channel_dimension_format(__A , __A ) for image in images]
A : Tuple = {"pixel_values": images}
return BatchFeature(data=__A , tensor_type=__A ) | 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 0 |
import qiskit
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A : Optional[int] = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
A : List[str] = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 17 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
A : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
A : Tuple = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__SCREAMING_SNAKE_CASE , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__SCREAMING_SNAKE_CASE , py_version="py36" , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Any ) -> Any:
A : Dict = self.create_estimator(__SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
A : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __SCREAMING_SNAKE_CASE ) | 715 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 17 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "AutoImageProcessor"
a__ = "AutoTokenizer"
def __init__( self : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Union[str, Any] ) -> Any:
A : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
A : Any = kwargs.pop("feature_extractor" )
A : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
A : List[str] = self.image_processor
A : int = False
def __call__( self : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ) -> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
A : Tuple = kwargs.pop("images" , __a )
A : str = kwargs.pop("text" , __a )
if len(__a ) > 0:
A : Union[str, Any] = args[0]
A : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A : int = self.image_processor(__a , *__a , **__a )
if text is not None:
A : Tuple = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
A : List[Any] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*__a , **__a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ) -> int:
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
A : List[str] = True
A : Optional[Any] = self.tokenizer
yield
A : Dict = self.image_processor
A : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=None ) -> Optional[Any]:
if added_vocab is None:
A : int = self.tokenizer.get_added_vocab()
A : Any = {}
while tokens:
A : Any = re.search(r"<s_(.*?)>" , __a , re.IGNORECASE )
if start_token is None:
break
A : int = start_token.group(1 )
A : Tuple = re.search(rF"""</s_{key}>""" , __a , re.IGNORECASE )
A : Any = start_token.group()
if end_token is None:
A : int = tokens.replace(__a , "" )
else:
A : str = end_token.group()
A : Optional[int] = re.escape(__a )
A : Tuple = re.escape(__a )
A : Optional[int] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , __a , re.IGNORECASE )
if content is not None:
A : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
A : Union[str, Any] = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a )
if value:
if len(__a ) == 1:
A : Optional[Any] = value[0]
A : Union[str, Any] = value
else: # leaf nodes
A : Any = []
for leaf in content.split(r"<sep/>" ):
A : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
A : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
A : Optional[Any] = output[key][0]
A : str = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = tmp_path / "cache"
A : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Any = JsonDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = tmp_path / "cache"
A : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A : List[str] = features.copy() if features else default_expected_features
A : int = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = tmp_path / "cache"
A : List[Any] = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : Tuple = features.copy() if features else default_expected_features
A : Union[str, Any] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : str = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
A : List[str] = features.copy()
A : int = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Union[str, Any] = tmp_path / "cache"
A : Any = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[str] = tmp_path / "cache"
A : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A : Any = JsonDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if issubclass(_lowercase , _lowercase ):
A : Optional[Any] = jsonl_path
elif issubclass(_lowercase , _lowercase ):
A : List[str] = [jsonl_path]
A : int = tmp_path / "cache"
A : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A : Union[str, Any] = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ):
assert isinstance(_lowercase , _lowercase )
for split in splits:
A : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = tmp_path / "cache"
A : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Union[str, Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Tuple = tmp_path / "cache"
A : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A : Any = features.copy() if features else default_expected_features
A : Any = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : List[Any] = JsonDatasetReader({"train": jsonl_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if split:
A : Union[str, Any] = {split: jsonl_path}
else:
A : str = "train"
A : List[Any] = {"train": jsonl_path, "test": jsonl_path}
A : Union[str, Any] = tmp_path / "cache"
A : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A : Optional[Any] = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase ( _lowerCamelCase ):
return json.load(_lowercase )
def UpperCAmelCase ( _lowerCamelCase ):
return [json.loads(_lowercase ) for line in buffer]
class lowerCamelCase_ :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A ).write()
buffer.seek(0 )
A : List[str] = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A ).write()
buffer.seek(0 )
A : Tuple = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , num_proc=2 ).write()
buffer.seek(0 )
A : Any = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A , num_proc=2 ).write()
buffer.seek(0 )
A : Optional[int] = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[int] ) -> Optional[int]:
with pytest.raises(__A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> Optional[Any]:
A : int = tmp_path_factory.mktemp("data" ) / F"""test.json.{extension}"""
A : Tuple = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__A , __A , compression=__A ).write()
with fsspec.open(__A , "rb" , compression="infer" ) as f:
A : Dict = f.read()
with fsspec.open(__A , "rb" , compression="infer" ) as f:
A : List[Any] = f.read()
assert exported_content == original_content | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__SCREAMING_SNAKE_CASE = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def UpperCAmelCase ( _lowerCamelCase ):
A : List[str] = {}
state_dict.pop("pixel_mean" , _lowerCamelCase )
state_dict.pop("pixel_std" , _lowerCamelCase )
A : Any = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : str = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
A : List[Any] = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) )
if layer_nb == 0:
A : int = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
A : Union[str, Any] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
A : Optional[int] = key.replace("layers.2" , "proj_out" )
A : Dict = value
A : Optional[int] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="ybelkada/segment-anything" ):
A : int = hf_hub_download(_lowerCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
A : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
A : Union[str, Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
A : Optional[Any] = SamConfig(
vision_config=_lowerCamelCase , )
elif "sam_vit_h" in model_name:
A : Optional[int] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
A : Optional[int] = SamConfig(
vision_config=_lowerCamelCase , )
A : str = torch.load(_lowerCamelCase , map_location="cpu" )
A : Dict = replace_keys(_lowerCamelCase )
A : Any = SamImageProcessor()
A : Union[str, Any] = SamProcessor(image_processor=_lowerCamelCase )
A : int = SamModel(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
A : List[Any] = hf_model.to("cuda" )
A : Tuple = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
A : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
A : str = [[[400, 650]]]
A : Any = [[1]]
A : Optional[int] = processor(images=np.array(_lowerCamelCase ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
A : Optional[Any] = hf_model(**_lowerCamelCase )
A : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
A : Tuple = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
A : int = hf_model(**_lowerCamelCase )
A : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
A : Union[str, Any] = ((75, 275, 1725, 850),)
A : Optional[int] = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
A : Optional[int] = hf_model(**_lowerCamelCase )
A : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
A : int = [[[400, 650], [800, 650]]]
A : str = [[1, 1]]
A : Dict = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
A : Optional[int] = hf_model(**_lowerCamelCase )
A : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 718 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) | 17 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
__SCREAMING_SNAKE_CASE = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
__SCREAMING_SNAKE_CASE = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
__SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
__SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 719 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (PNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]:
A : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple:
A : Dict = dict(self.forward_default_kwargs )
A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : Union[str, Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Any = self.get_scheduler_config(**__lowerCamelCase )
A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str:
A : List[str] = dict(self.forward_default_kwargs )
A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : List[str] = self.dummy_sample
A : Any = 0.1 * sample
A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Tuple = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[:]
A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(**__lowerCamelCase )
A : str = scheduler_class(**__lowerCamelCase )
A : List[str] = 10
A : Union[str, Any] = self.dummy_model()
A : int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A : Tuple = model(__lowerCamelCase , __lowerCamelCase )
A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : Union[str, Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
A : List[Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A : str = 27
for scheduler_class in self.scheduler_classes:
A : Tuple = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Optional[Any] = self.full_loop()
A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = self.full_loop(prediction_type="v_prediction" )
A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 17 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__SCREAMING_SNAKE_CASE = """scheduler_config.json"""
class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ = 1
a__ = 2
a__ = 3
a__ = 4
a__ = 5
@dataclass
class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ = 42
class lowerCamelCase_ :
'''simple docstring'''
a__ = SCHEDULER_CONFIG_NAME
a__ = ['dtype']
a__ = []
a__ = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = None , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : List[Any] , ) -> Tuple:
A , A : Optional[int] = cls.load_config(
pretrained_model_name_or_path=_a , subfolder=_a , return_unused_kwargs=_a , **_a , )
A , A : List[str] = cls.from_config(_a , return_unused_kwargs=_a , **_a )
if hasattr(_a , "create_state" ) and getattr(_a , "has_state" , _a ):
A : Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] = False , **__lowerCamelCase : Dict ) -> Any:
self.save_config(save_directory=_a , push_to_hub=_a , **_a )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ) -> Tuple:
A : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A : int = importlib.import_module(__name__.split("." )[0] )
A : Union[str, Any] = [
getattr(_a , _a ) for c in compatible_classes_str if hasattr(_a , _a )
]
return compatible_classes
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
assert len(lowerCAmelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase__ ) - x.ndim) ) , lowerCAmelCase__ )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=0.9_99 , _lowerCamelCase=jnp.floataa ):
def alpha_bar(_lowerCamelCase ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
A : Any = []
for i in range(lowerCAmelCase__ ):
A : Optional[Any] = i / num_diffusion_timesteps
A : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCAmelCase__ ) / alpha_bar(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return jnp.array(lowerCAmelCase__ , dtype=lowerCAmelCase__ )
@flax.struct.dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , __lowerCamelCase : Optional[int] ) -> str:
A : Union[str, Any] = scheduler.config
if config.trained_betas is not None:
A : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A : Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A : Dict = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
A : str = 1.0 - betas
A : int = jnp.cumprod(_a , axis=0 )
return cls(
alphas=_a , betas=_a , alphas_cumprod=_a , )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[Any] = state.alphas_cumprod
A : Any = alphas_cumprod[timesteps] ** 0.5
A : str = sqrt_alpha_prod.flatten()
A : Any = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape )
A : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
A : Any = sqrt_one_minus_alpha_prod.flatten()
A : Tuple = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A , A : str = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
A : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A , A : str = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
A : Dict = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 720 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""ChineseCLIPFeatureExtractor"""]
__SCREAMING_SNAKE_CASE = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase ):
A : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase ( _lowerCamelCase = 100 ):
A : Optional[int] = 1
A : int = 2
for i in range(2 , max_n + 1 ):
A : List[Any] = pre_numerator
A : Tuple = 2 * i // 3 if i % 3 == 0 else 1
A : Optional[Any] = cur_numerator
A : Any = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
from __future__ import annotations
import math
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Dict = u
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
A : Optional[Any] = temp * (u - i)
return temp
def UpperCAmelCase ( ):
A : Union[str, Any] = int(input("enter the numbers of values: " ) )
A : list[list[float]] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
y[i].append(SCREAMING_SNAKE_CASE_ )
A : Optional[int] = 0
print("enter the values of parameters in a list: " )
A : Optional[int] = list(map(SCREAMING_SNAKE_CASE_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(SCREAMING_SNAKE_CASE_ ):
A : Union[str, Any] = float(input() )
A : List[str] = int(input("enter the value to interpolate: " ) )
A : Any = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(n - i ):
A : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
A : int = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
summ += (ucal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__SCREAMING_SNAKE_CASE = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
A : Tuple = True
while ask_again:
A : Union[str, Any] = input(__lowercase )
try:
if default is not None and len(__lowercase ) == 0:
return default
return convert_value(__lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowercase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=0 ):
A : List[Any] = BulletMenu(__lowercase , __lowercase )
A : Union[str, Any] = menu.run(default_choice=__lowercase )
return convert_value(__lowercase ) if convert_value is not None else result
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = int(__lowercase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def UpperCAmelCase ( _lowerCamelCase ):
A : int = int(__lowercase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = int(__lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCAmelCase ( _lowerCamelCase ):
A : Tuple = int(__lowercase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = int(__lowercase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def UpperCAmelCase ( _lowerCamelCase ):
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> List[Any]:
A : Optional[Any] = super()._format_usage(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Union[str, Any] = usage.replace("<command> [<args>] " , "" )
return usage | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1024 ):
A , A : Optional[int] = [], []
A : Dict = list(zip(__snake_case , __snake_case ) )
A , A : Any = sorted_examples[0]
def is_too_big(_lowerCamelCase ):
return tok(__snake_case , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A : Dict = new_src + " " + src
A : int = new_tgt + " " + tgt
if is_too_big(__snake_case ) or is_too_big(__snake_case ): # cant fit, finalize example
finished_src.append(__snake_case )
finished_tgt.append(__snake_case )
A , A : str = src, tgt
else: # can fit, keep adding
A , A : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__snake_case )
finished_tgt.append(__snake_case )
return finished_src, finished_tgt
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = Path(__snake_case )
save_path.mkdir(exist_ok=__snake_case )
for split in ["train"]:
A , A : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
A : Optional[int] = [x.rstrip() for x in Path(__snake_case ).open().readlines()]
A : Tuple = [x.rstrip() for x in Path(__snake_case ).open().readlines()]
A , A : Union[str, Any] = pack_examples(__snake_case , __snake_case , __snake_case , __snake_case )
print(f"""packed {split} split from {len(__snake_case )} examples -> {len(__snake_case )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(__snake_case ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(__snake_case ) )
for split in ["val", "test"]:
A , A : Any = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(__snake_case , save_path / f"""{split}.source""" )
shutil.copyfile(__snake_case , save_path / f"""{split}.target""" )
def UpperCAmelCase ( ):
A : List[str] = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__snake_case , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__snake_case , default=128 )
parser.add_argument("--data_dir" , type=__snake_case )
parser.add_argument("--save_path" , type=__snake_case )
A : str = parser.parse_args()
A : Union[str, Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__snake_case , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__SCREAMING_SNAKE_CASE = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
# Initialise PyTorch model
A : int = XLNetConfig.from_json_file(lowerCAmelCase__ )
A : Dict = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
A : Optional[int] = finetuning_task
A : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
A : str = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
A : Dict = finetuning_task
A : Dict = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
A : Any = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
A : Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
A : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase__ )}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
) | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
A : Tuple = tempfile.mkdtemp()
A : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A : Optional[Any] = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
"do_convert_rgb": True,
}
A : Tuple = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Dict ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , **__lowerCamelCase : Optional[int] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , **__lowerCamelCase : List[Any] ) -> str:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
A : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = self.get_tokenizer()
A : int = self.get_rust_tokenizer()
A : Dict = self.get_image_processor()
A : Tuple = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
A : Optional[int] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
A : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Union[str, Any] = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
A : Optional[Any] = self.get_image_processor(do_normalize=__lowerCamelCase )
A : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
A : List[str] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Tuple = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
A : List[Any] = self.prepare_image_inputs()
A : Tuple = image_processor(__lowerCamelCase , return_tensors="np" )
A : Optional[int] = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : int = self.get_image_processor()
A : Optional[int] = self.get_tokenizer()
A : Any = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
A : int = "Alexandra,T-shirt的价格是15便士。"
A : Tuple = processor(text=__lowerCamelCase )
A : Tuple = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
A : List[str] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Optional[int] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
A : Optional[int] = "Alexandra,T-shirt的价格是15便士。"
A : List[str] = self.prepare_image_inputs()
A : List[str] = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Any = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
A : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Tuple = processor.batch_decode(__lowerCamelCase )
A : Any = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
A : Any = self.get_image_processor()
A : Union[str, Any] = self.get_tokenizer()
A : Tuple = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
A : Any = "Alexandra,T-shirt的价格是15便士。"
A : int = self.prepare_image_inputs()
A : Optional[int] = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__SCREAMING_SNAKE_CASE = {
'google/rembert': 256,
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=False , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : List[Any]="[UNK]" , __lowerCamelCase : Optional[int]="[SEP]" , __lowerCamelCase : Any="[PAD]" , __lowerCamelCase : Tuple="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , **__lowerCamelCase : Tuple , ) -> str:
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A : Optional[Any] = do_lower_case
A : List[str] = remove_space
A : int = keep_accents
A : Optional[int] = vocab_file
A : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
A : Union[str, Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Optional[Any]:
A : Tuple = self.__dict__.copy()
A : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
A : Tuple = d
A : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
A : int = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any ) -> Optional[int]:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int ) -> List[str]:
A : Union[str, Any] = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] = None ) -> List[int]:
A : int = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any = None , __lowerCamelCase : List[str] = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] = None ) -> List[int]:
A : Optional[int] = [self.sep_token_id]
A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
A : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( _lowerCamelCase ):
A : int = 384
A : Optional[int] = 7
if "tiny" in model_name:
A : str = 96
A : str = (2, 2, 6, 2)
A : Any = (3, 6, 12, 24)
elif "small" in model_name:
A : Tuple = 96
A : str = (2, 2, 18, 2)
A : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
A : int = 128
A : Optional[Any] = (2, 2, 18, 2)
A : Dict = (4, 8, 16, 32)
A : List[str] = 12
A : List[str] = 512
elif "large" in model_name:
A : Any = 192
A : Optional[Any] = (2, 2, 18, 2)
A : str = (6, 12, 24, 48)
A : List[Any] = 12
A : List[Any] = 768
# set label information
A : List[Any] = 150
A : Dict = "huggingface/label-files"
A : Optional[int] = "ade20k-id2label.json"
A : Optional[Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
A : Any = {int(A_ ): v for k, v in idalabel.items()}
A : Dict = {v: k for k, v in idalabel.items()}
A : Union[str, Any] = SwinConfig(
embed_dim=A_ , depths=A_ , num_heads=A_ , window_size=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
A : Any = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def UpperCAmelCase ( _lowerCamelCase ):
A : Tuple = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = dct.pop(A_ )
A : Union[str, Any] = val
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
A : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A : Dict = in_proj_weight[:dim, :]
A : List[str] = in_proj_bias[: dim]
A : Any = in_proj_weight[
dim : dim * 2, :
]
A : List[Any] = in_proj_bias[
dim : dim * 2
]
A : Dict = in_proj_weight[
-dim :, :
]
A : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( _lowerCamelCase ):
A , A : int = x.shape
A : Union[str, Any] = x.reshape(A_ , 4 , in_channel // 4 )
A : List[Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def UpperCAmelCase ( _lowerCamelCase ):
A , A : Union[str, Any] = x.shape
A : List[Any] = x.reshape(A_ , in_channel // 4 , 4 )
A : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def UpperCAmelCase ( _lowerCamelCase ):
A : int = x.shape[0]
A : List[str] = x.reshape(4 , in_channel // 4 )
A : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A_ )
return x
def UpperCAmelCase ( _lowerCamelCase ):
A : List[str] = x.shape[0]
A : List[str] = x.reshape(in_channel // 4 , 4 )
A : List[str] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A_ )
return x
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[Any] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
A : str = model_name_to_url[model_name]
A : Optional[int] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , file_name=A_ )[
"state_dict"
]
for name, param in state_dict.items():
print(A_ , param.shape )
A : Any = get_upernet_config(A_ )
A : Optional[int] = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A : Any = state_dict.pop(A_ )
if "bn" in key:
A : Optional[int] = key.replace("bn" , "batch_norm" )
A : Optional[Any] = val
# rename keys
A : Any = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A : Any = reverse_correct_unfold_reduction_order(A_ )
if "norm" in key:
A : int = reverse_correct_unfold_norm_order(A_ )
model.load_state_dict(A_ )
# verify on image
A : Optional[Any] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
A : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
A : List[str] = SegformerImageProcessor()
A : int = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
A : Dict = model(A_ )
A : Tuple = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A : Union[str, Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
A : int = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
A : List[str] = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
A : Union[str, Any] = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE = TypeVar("""T""")
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
a__ = 42 # Cache store of keys
a__ = 42 # References of the keys in cache
a__ = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
A : str = deque()
A : List[str] = set()
if not n:
A : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
A : int = n
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Dict:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(_A )
else:
self.dq_store.remove(_A )
self.dq_store.appendleft(_A )
self.key_reference.add(_A )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
for k in self.dq_store:
print(_A )
def __repr__( self : int ) -> Any:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCAmelCase ( ):
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating prime p..." )
A : str = rabinMiller.generate_large_prime(__snake_case )
print("Generating prime q..." )
A : List[Any] = rabinMiller.generate_large_prime(__snake_case )
A : List[Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
A : Optional[int] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__snake_case , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
A : Dict = cryptoMath.find_mod_inverse(__snake_case , (p - 1) * (q - 1) )
A : Dict = (n, e)
A : str = (n, d)
return (public_key, private_key)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
A , A : int = generate_key(__snake_case )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main() | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "ViltImageProcessor"
a__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : List[str] ) -> Optional[Any]:
A : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
A : Union[str, Any] = kwargs.pop("feature_extractor" )
A : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
A : Dict = self.image_processor
def __call__( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Union[str, Any] , ) -> int:
A : List[str] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel_values + pixel_mask
A : Dict = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
encoding.update(lowerCamelCase_ )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> str:
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
A : List[Any] = self.tokenizer.model_input_names
A : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase_ ( __lowerCamelCase ):
'''simple docstring'''
a__ = 'big_bird'
def __init__( self : Tuple , __lowerCamelCase : str=5_03_58 , __lowerCamelCase : List[Any]=7_68 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : Union[str, Any]="gelu_new" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=40_96 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=0 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=66 , __lowerCamelCase : Union[str, Any]="block_sparse" , __lowerCamelCase : Any=True , __lowerCamelCase : Any=False , __lowerCamelCase : Any=64 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Tuple , ) -> Tuple:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
A : Optional[int] = vocab_size
A : Any = max_position_embeddings
A : Optional[int] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : Union[str, Any] = initializer_range
A : List[str] = type_vocab_size
A : Tuple = layer_norm_eps
A : Optional[int] = use_cache
A : List[Any] = rescale_embeddings
A : Any = attention_type
A : Tuple = use_bias
A : Union[str, Any] = block_size
A : Tuple = num_random_blocks
A : Union[str, Any] = classifier_dropout
class lowerCamelCase_ ( __lowerCamelCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
if self.task == "multiple-choice":
A : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
A : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCamelCase_ ( _UpperCamelCase ):
'''simple docstring'''
a__ = "informer"
a__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = None , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str = "prob" , __lowerCamelCase : int = 5 , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
# time series specific configuration
A : Any = prediction_length
A : List[str] = context_length or prediction_length
A : Tuple = distribution_output
A : Union[str, Any] = loss
A : Union[str, Any] = input_size
A : List[str] = num_time_features
A : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A : List[str] = scaling
A : str = num_dynamic_real_features
A : Tuple = num_static_real_features
A : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A : Dict = cardinality
else:
A : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A : Union[str, Any] = embedding_dimension
else:
A : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A : Dict = num_parallel_samples
# Transformer architecture configuration
A : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
A : Optional[Any] = d_model
A : int = encoder_attention_heads
A : Tuple = decoder_attention_heads
A : List[Any] = encoder_ffn_dim
A : List[str] = decoder_ffn_dim
A : List[str] = encoder_layers
A : Tuple = decoder_layers
A : Union[str, Any] = dropout
A : List[Any] = attention_dropout
A : str = activation_dropout
A : int = encoder_layerdrop
A : Union[str, Any] = decoder_layerdrop
A : Tuple = activation_function
A : str = init_std
A : Tuple = use_cache
# Informer
A : Union[str, Any] = attention_type
A : Union[str, Any] = sampling_factor
A : Tuple = distil
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 17 | 0 |
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[Any]:
A : str = {}
def SCREAMING_SNAKE_CASE__ ( self : int ) -> None:
print(self.vertex )
for i in self.vertex:
print(lowercase_ , " -> " , " -> ".join([str(lowercase_ ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
A : Optional[Any] = [to_vertex]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> None:
A : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : list ) -> None:
A : List[str] = True
print(lowercase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 715 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 17 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
A : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if metric == "rouge2":
A : Tuple = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A : str = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A : int = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
A : List[str] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
A : Union[str, Any] = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=f"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=snake_case__ , verbose=snake_case__ , )
class lowerCamelCase_ ( pl.Callback ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ) -> Tuple:
A : List[Any] = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
A : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
A : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
A : Dict = od / "test_results.txt"
A : Dict = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A : List[str] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
A : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , "a+" ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
A : Optional[Any] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
A : Tuple = val.item()
A : List[str] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
A : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCAmelCase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> List[Any]:
try:
A : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
A : Dict = pl_module.model.num_parameters()
A : Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , "test" )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
'''simple docstring'''
def UpperCAmelCase ( _lowerCamelCase ):
for i in range(len(_lowerCamelCase ) - 1 , 0 , -1 ):
A : Dict = False
for j in range(_lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
A : str = unsorted[j - 1], unsorted[j]
A : Optional[Any] = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
A : Optional[Any] = unsorted[j + 1], unsorted[j]
A : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(F"""{cocktail_shaker_sort(unsorted) = }""") | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
A : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A : List[Any] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_lowerCamelCase , output_all_encodings=_lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A : Tuple = os.path.join(get_home_dir() , "models" )
A : Tuple = _load_vocab(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls=_lowerCamelCase )
A : Any = nlp.model.BERTModel(
_lowerCamelCase , len(_lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_lowerCamelCase , use_token_type_embed=_lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_lowerCamelCase , use_decoder=_lowerCamelCase , )
original_bort.load_parameters(_lowerCamelCase , cast_dtype=_lowerCamelCase , ignore_extra=_lowerCamelCase )
A : str = original_bort._collect_params_with_prefix()
# Build our config 🤗
A : Union[str, Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_lowerCamelCase ),
}
A : str = BertConfig.from_dict(_lowerCamelCase )
A : List[str] = BertForMaskedLM(_lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_lowerCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = hf_param.shape
A : int = to_torch(params[gluon_param] )
A : List[str] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
A : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
A : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
A : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
A : BertSelfAttention = layer.attention.self
A : Any = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A : Union[str, Any] = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A : str = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A : Union[str, Any] = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A : Tuple = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A : BertSelfOutput = layer.attention.output
A : int = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
A : Union[str, Any] = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A : BertIntermediate = layer.intermediate
A : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A : List[Any] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A : BertOutput = layer.output
A : List[Any] = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A : List[Any] = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A : Dict = RobertaTokenizer.from_pretrained("roberta-base" )
A : Tuple = tokenizer.encode_plus(_lowerCamelCase )["input_ids"]
# Get gluon output
A : Optional[Any] = mx.nd.array([input_ids] )
A : Any = original_bort(inputs=_lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowerCamelCase )
A : List[Any] = BertModel.from_pretrained(_lowerCamelCase )
hf_bort_model.eval()
A : Optional[int] = tokenizer.encode_plus(_lowerCamelCase , return_tensors="pt" )
A : Tuple = hf_bort_model(**_lowerCamelCase )[0]
A : Tuple = output_gluon[0].asnumpy()
A : Optional[Any] = output_hf[0].detach().numpy()
A : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A : Optional[int] = np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 718 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 719 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (PNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]:
A : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple:
A : Dict = dict(self.forward_default_kwargs )
A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : Union[str, Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Any = self.get_scheduler_config(**__lowerCamelCase )
A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str:
A : List[str] = dict(self.forward_default_kwargs )
A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : List[str] = self.dummy_sample
A : Any = 0.1 * sample
A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Tuple = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[:]
A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(**__lowerCamelCase )
A : str = scheduler_class(**__lowerCamelCase )
A : List[str] = 10
A : Union[str, Any] = self.dummy_model()
A : int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A : Tuple = model(__lowerCamelCase , __lowerCamelCase )
A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : Union[str, Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
A : List[Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A : str = 27
for scheduler_class in self.scheduler_classes:
A : Tuple = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Optional[Any] = self.full_loop()
A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = self.full_loop(prediction_type="v_prediction" )
A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase = 100 ):
A : int = (n * (n + 1) // 2) ** 2
A : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"""{solution() = }""") | 720 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = ["""small""", """medium""", """large"""]
__SCREAMING_SNAKE_CASE = """lm_head.decoder.weight"""
__SCREAMING_SNAKE_CASE = """lm_head.weight"""
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = torch.load(_lowerCAmelCase )
A : Tuple = d.pop(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__SCREAMING_SNAKE_CASE = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__SCREAMING_SNAKE_CASE = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__SCREAMING_SNAKE_CASE = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
A : str = len(lowerCamelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase_ , lowerCamelCase_ , )
def UpperCAmelCase ( _lowerCamelCase ):
A : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase_ , lowerCamelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase_ )
print("" )
print(len(lowerCamelCase_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 701 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 0 |
from maths.prime_check import is_prime
def UpperCAmelCase ( _lowerCamelCase ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A : Optional[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if is_prime(UpperCamelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = SwinConfig(image_size=192 )
if "base" in model_name:
A : Dict = 6
A : Optional[Any] = 128
A : str = (2, 2, 18, 2)
A : Dict = (4, 8, 16, 32)
elif "large" in model_name:
A : str = 12
A : Optional[int] = 192
A : Any = (2, 2, 18, 2)
A : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
A : Any = window_size
A : Any = embed_dim
A : Dict = depths
A : Union[str, Any] = num_heads
return config
def UpperCAmelCase ( _lowerCamelCase ):
if "encoder.mask_token" in name:
A : int = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
A : List[str] = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
A : Tuple = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
A : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A : List[str] = name.replace("attn" , "attention.self" )
if "norm1" in name:
A : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A : int = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A : List[Any] = "layernorm.weight"
if name == "encoder.norm.bias":
A : Optional[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
A : str = "swin." + name
return name
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
A : Optional[int] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
A : str = key.split("." )
A : int = int(key_split[2] )
A : str = int(key_split[4] )
A : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : Optional[Any] = val[:dim, :]
A : Any = val[
dim : dim * 2, :
]
A : List[str] = val[-dim:, :]
else:
A : Tuple = val[
:dim
]
A : int = val[
dim : dim * 2
]
A : Any = val[
-dim:
]
else:
A : int = val
return orig_state_dict
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
A : Any = get_swin_config(__SCREAMING_SNAKE_CASE )
A : Optional[int] = SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
A : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
A : Optional[int] = ViTImageProcessor(size={"height": 192, "width": 192} )
A : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
A : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
with torch.no_grad():
A : Tuple = model(**__SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
a__ = field(default="summarization" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"text": Value("string" )} )
a__ = Features({"summary": Value("string" )} )
a__ = "text"
a__ = "summary"
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__SCREAMING_SNAKE_CASE = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__SCREAMING_SNAKE_CASE = {
'jukebox': 512,
}
class lowerCamelCase_ ( __lowercase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_LYRIC_TOKENS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=["v3", "v2", "v2"] , __lowerCamelCase : Any=5_12 , __lowerCamelCase : str=5 , __lowerCamelCase : Tuple="<|endoftext|>" , **__lowerCamelCase : Optional[int] , ) -> Any:
A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , )
A : Optional[int] = version
A : int = max_n_lyric_tokens
A : Optional[int] = n_genres
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
A : List[str] = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
A : Dict = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
A : List[Any] = json.load(__lowerCamelCase )
A : Tuple = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
A : Optional[Any] = oov.replace(r"\-'" , r"\-+'" )
A : Optional[int] = regex.compile(__lowerCamelCase )
A : Optional[int] = {v: k for k, v in self.artists_encoder.items()}
A : Union[str, Any] = {v: k for k, v in self.genres_encoder.items()}
A : Any = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ) -> Tuple:
A : Union[str, Any] = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
A : List[Any] = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]]
A : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
A : Any = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
return list(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , **__lowerCamelCase : List[str] ) -> List[str]:
A : List[str] = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : List[Any] = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
A : int = artists[idx].lower()
A : str = [genres[idx].lower()]
else:
A : List[Any] = self._normalize(artists[idx] ) + ".v2"
A : List[str] = [
self._normalize(__lowerCamelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
A : Union[str, Any] = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
A : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
A : List[Any] = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
A : List[str] = 0
A : Optional[int] = len(__lowerCamelCase ) + 1
A : int = self.vocab
A : List[Any] = {v: k for k, v in self.vocab.items()}
A : int = ""
else:
A : Optional[Any] = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
A : List[Any] = self._run_strip_accents(__lowerCamelCase )
A : str = lyrics.replace("\\" , "\n" )
A : Optional[int] = self.out_of_vocab.sub("" , __lowerCamelCase ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[str] ) -> Union[str, Any]:
A : Any = unicodedata.normalize("NFD" , __lowerCamelCase )
A : List[str] = []
for char in text:
A : List[str] = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str ) -> str:
A : Any = (
[chr(__lowerCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
A : Tuple = frozenset(__lowerCamelCase )
A : Dict = re.compile(r"_+" )
A : Optional[int] = "".join([c if c in accepted else "_" for c in text.lower()] )
A : Tuple = pattern.sub("_" , __lowerCamelCase ).strip("_" )
return text
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] ) -> str:
return " ".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : bool = False ) -> Dict:
# Convert to TensorType
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Dict = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
A : int = tf.constant
A : Optional[int] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
A : Optional[int] = torch.tensor
A : int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
A : Dict = jnp.array
A : Dict = _is_jax
else:
A : Union[str, Any] = np.asarray
A : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
A : List[str] = [inputs]
if not is_tensor(__lowerCamelCase ):
A : int = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : str="" , __lowerCamelCase : Optional[Any]="pt" ) -> BatchEncoding:
A : Any = [0, 0, 0]
A : Dict = [artist] * len(self.version )
A : Any = [genres] * len(self.version )
A : Optional[Any] = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : List[str] = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Union[str, Any] = [-INFINITY] * len(full_tokens[-1] )
A : Dict = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) )
A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) )
A : Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A : Tuple = self.artists_decoder.get(__lowerCamelCase )
A : Dict = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
A : str = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__SCREAMING_SNAKE_CASE = NewType("""DataClass""", Any)
__SCREAMING_SNAKE_CASE = NewType("""DataClassType""", Any)
def UpperCAmelCase ( _lowerCamelCase ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _lowerCamelCase : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( *,
_lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = None , **_lowerCamelCase , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A : List[Any] = {}
if aliases is not None:
A : str = aliases
if help is not None:
A : Optional[int] = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = 42
def __init__( self : Dict , __lowerCamelCase : Union[DataClassType, Iterable[DataClassType]] , **__lowerCamelCase : str ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCamelCase )
if dataclasses.is_dataclass(__UpperCamelCase ):
A : List[Any] = [dataclass_types]
A : List[Any] = list(__UpperCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : ArgumentParser , __lowerCamelCase : dataclasses.Field ) -> List[str]:
A : Any = F"""--{field.name}"""
A : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCamelCase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
A : Union[str, Any] = kwargs.pop("aliases" , [] )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A : Union[str, Any] = [aliases]
A : Optional[Any] = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__UpperCamelCase , "UnionType" ) and isinstance(__UpperCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCamelCase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field \'{field.name}\'.""" )
if type(__UpperCamelCase ) not in field.type.__args__:
# filter `str` in Union
A : Any = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A : Tuple = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A : Any = (
field.type.__args__[0] if isinstance(__UpperCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
A : int = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A : int = {}
if origin_type is Literal or (isinstance(field.type , __UpperCamelCase ) and issubclass(field.type , __UpperCamelCase )):
if origin_type is Literal:
A : Any = field.type.__args__
else:
A : Optional[int] = [x.value for x in field.type]
A : Optional[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
A : Union[str, Any] = field.default
else:
A : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A : Any = copy(__UpperCamelCase )
# Hack because type=bool in argparse does not behave as we want.
A : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A : str = default
# This tells argparse we accept 0 or 1 value after --field_name
A : Any = "?"
# This is the value that will get picked if we do --field_name (without value)
A : Optional[Any] = True
elif isclass(__UpperCamelCase ) and issubclass(__UpperCamelCase , __UpperCamelCase ):
A : Union[str, Any] = field.type.__args__[0]
A : Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
A : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
A : List[str] = True
else:
A : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
A : int = field.default
elif field.default_factory is not dataclasses.MISSING:
A : Optional[Any] = field.default_factory()
else:
A : List[str] = True
parser.add_argument(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A : Tuple = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : DataClassType ) -> List[str]:
if hasattr(__UpperCamelCase , "_argument_group_name" ):
A : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
A : Optional[int] = self
try:
A : str = get_type_hints(__UpperCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCamelCase ):
A : List[str] = ".".join(map(__UpperCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__UpperCamelCase ):
if not field.init:
continue
A : Union[str, Any] = type_hints[field.name]
self._parse_dataclass_field(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A : Any = []
if args_filename:
args_files.append(Path(__UpperCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A : Dict = ArgumentParser()
args_file_parser.add_argument(__UpperCamelCase , type=__UpperCamelCase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A : Optional[int] = args_file_parser.parse_known_args(args=__UpperCamelCase )
A : List[Any] = vars(__UpperCamelCase ).get(args_file_flag.lstrip("-" ) , __UpperCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCamelCase ) for p in cmd_args_file_paths] )
A : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
A , A : int = self.parse_known_args(args=__UpperCamelCase )
A : Dict = []
for dtype in self.dataclass_types:
A : Optional[Any] = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
A : Any = {k: v for k, v in vars(__UpperCamelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCamelCase , __UpperCamelCase )
A : Optional[int] = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Dict[str, Any] , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
A : Tuple = set(args.keys() )
A : Optional[int] = []
for dtype in self.dataclass_types:
A : Any = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
A : Optional[Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A : Tuple = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__UpperCamelCase )}""" )
return tuple(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__UpperCamelCase ) , encoding="utf-8" ) as open_json_file:
A : Optional[int] = json.loads(open_json_file.read() )
A : int = self.parse_dict(__UpperCamelCase , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
A : Optional[int] = self.parse_dict(yaml.safe_load(Path(__UpperCamelCase ).read_text() ) , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase ) | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = " " ):
A : str = []
A : str = 0
for index, char in enumerate(_lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
A : List[Any] = index + 1
elif index + 1 == len(_lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
import heapq
import sys
import numpy as np
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int ) -> Optional[int]:
A : Union[str, Any] = []
A : Union[str, Any] = set()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Any:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
A : int = []
(A) : Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(A) : Dict = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Dict ) -> Union[str, Any]:
if item in self.set:
self.set.remove(UpperCamelCase__ )
A : Optional[Any] = []
(A) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(A) : Union[str, Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
return self.elements[0][1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
(A) : Any = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = np.array(lowerCAmelCase__ )
A : List[str] = np.array(lowerCAmelCase__ )
return np.linalg.norm(a - b )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return consistent_heuristic(lowerCAmelCase__ , lowerCAmelCase__ ) // t
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[str] = g_function[start] + Wa * heuristics[i](lowerCAmelCase__ , lowerCAmelCase__ )
return ans
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = np.chararray((n, n) )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
A : str = '*'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
A : Any = '#'
A : str = '-'
A : Optional[Any] = back_pointer[goal]
while x != start:
(A) : Any = x
# print(x)
A : int = '-'
A : Optional[Any] = back_pointer[x]
A : Optional[Any] = '-'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
A : Optional[int] = back_pointer[goal]
while x != start:
print(lowerCAmelCase__ , end=" " )
A : Tuple = back_pointer[x]
print(lowerCAmelCase__ )
sys.exit()
def UpperCAmelCase ( _lowerCamelCase ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
for itera in range(lowerCAmelCase__ ):
open_list[itera].remove_element(lowerCAmelCase__ )
# print("s", s)
# print("j", j)
(A) : Any = s
A : Dict = (x - 1, y)
A : Tuple = (x + 1, y)
A : int = (x, y + 1)
A : Tuple = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase__ )
A : List[Any] = -1
A : List[str] = float("inf" )
if valid(lowerCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
A : List[Any] = g_function[s] + 1
A : int = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase__ , key(lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase__ ):
if key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) <= Wa * key(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ):
open_list[j].put(
lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase ( ):
A : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__SCREAMING_SNAKE_CASE = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__SCREAMING_SNAKE_CASE = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__SCREAMING_SNAKE_CASE = make_common_ground()
__SCREAMING_SNAKE_CASE = blocks_blk
# hyper parameters
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = 3 # one consistent and two other inconsistent
# start and end destination
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (n - 1, n - 1)
__SCREAMING_SNAKE_CASE = 1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Tuple = {start: 0, goal: float("inf" )}
A : Union[str, Any] = {start: -1, goal: -1}
A : Union[str, Any] = []
A : List[Any] = set()
for i in range(lowerCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
A : list[int] = []
A : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , lowerCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
A : Dict = open_list[i].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_inad.append(lowerCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
A : Union[str, Any] = open_list[0].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_anchor.append(lowerCAmelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( a__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Dict = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str = 5_12 , __lowerCamelCase : str = 5_12 , __lowerCamelCase : Tuple = 50 , __lowerCamelCase : Optional[int] = 7.5 , __lowerCamelCase : Tuple = None , __lowerCamelCase : List[Any] = 1 , __lowerCamelCase : Optional[Any] = 0.0 , __lowerCamelCase : List[Any] = None , __lowerCamelCase : int = None , __lowerCamelCase : Union[str, Any] = "pil" , __lowerCamelCase : List[Any] = True , __lowerCamelCase : int = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Dict = None , **__lowerCamelCase : List[Any] , ) -> Any:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A : str = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A : Optional[int] = len(lowerCamelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase_ )}.""" )
# get prompt text embeddings
A : str = self.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A , A , A : Dict = text_embeddings.shape
A : Union[str, Any] = text_embeddings.repeat(1 , lowerCamelCase_ , 1 )
A : int = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A : Optional[int] = 42
if negative_prompt is None:
A : int = [""]
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !="""
F""" {type(lowerCamelCase_ )}.""" )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A : Dict = negative_prompt
A : Optional[int] = text_input_ids.shape[-1]
A : Dict = self.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="pt" , )
A : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A : Dict = uncond_embeddings.shape[1]
A : Optional[Any] = uncond_embeddings.repeat(lowerCamelCase_ , lowerCamelCase_ , 1 )
A : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A : List[str] = torch.randn(
lowerCamelCase_ , generator=lowerCamelCase_ , device="cpu" , dtype=lowerCamelCase_ ).to(self.device )
A : Union[str, Any] = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device="cpu" , dtype=lowerCamelCase_ ).to(
self.device )
else:
A : str = torch.randn(
lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
A : Any = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A : Optional[Any] = latents_reference.to(self.device )
A : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
A : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A : str = 0 if dx < 0 else dx
A : Tuple = 0 if dy < 0 else dy
A : List[str] = max(-dx , 0 )
A : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A : Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A : Dict = {}
if accepts_eta:
A : Dict = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
A : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A : str = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
A : Union[str, Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
A , A : Dict = noise_pred.chunk(2 )
A : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A : Any = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : List[Any] = 1 / 0.18215 * latents
A : Optional[int] = self.vae.decode(lowerCamelCase_ ).sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A : Optional[Any] = self.feature_extractor(self.numpy_to_pil(lowerCamelCase_ ) , return_tensors="pt" ).to(
self.device )
A , A : Any = self.safety_checker(
images=lowerCamelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A : Optional[int] = None
if output_type == "pil":
A : Dict = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_ ) | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
__SCREAMING_SNAKE_CASE = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = EfficientNetConfig()
A : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
A : str = CONFIG_MAP[model_name]["width_coef"]
A : Tuple = CONFIG_MAP[model_name]["depth_coef"]
A : Tuple = CONFIG_MAP[model_name]["image_size"]
A : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
A : str = CONFIG_MAP[model_name]["dw_padding"]
A : Optional[int] = "huggingface/label-files"
A : Optional[int] = "imagenet-1k-id2label.json"
A : int = 1000
A : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A : List[str] = idalabel
A : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( ):
A : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
A : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
def UpperCAmelCase ( _lowerCamelCase ):
A : List[str] = CONFIG_MAP[model_name]["image_size"]
A : str = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowerCamelCase , )
return preprocessor
def UpperCAmelCase ( _lowerCamelCase ):
A : List[str] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
A : Tuple = sorted(set(_lowerCamelCase ) )
A : List[str] = len(_lowerCamelCase )
A : int = {b: str(_lowerCamelCase ) for b, i in zip(_lowerCamelCase , range(_lowerCamelCase ) )}
A : Union[str, Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
A : Optional[int] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
A : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
A : Optional[Any] = "efficientnet." + item[1]
A : Optional[int] = "classifier.weight"
A : List[Any] = "classifier.bias"
return key_mapping
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
A : Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
A : List[Any] = torch.from_numpy(_lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A : int = torch.from_numpy(_lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A : Dict = torch.from_numpy(np.transpose(_lowerCamelCase ) )
else:
A : str = torch.from_numpy(_lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Tuple = model_classes[model_name](
include_top=_lowerCamelCase , weights="imagenet" , input_tensor=_lowerCamelCase , input_shape=_lowerCamelCase , pooling=_lowerCamelCase , classes=1000 , classifier_activation="softmax" , )
A : Tuple = original_model.trainable_variables
A : Optional[Any] = original_model.non_trainable_variables
A : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A : Optional[int] = param.numpy()
A : Tuple = list(tf_params.keys() )
# Load HuggingFace model
A : List[str] = get_efficientnet_config(_lowerCamelCase )
A : List[str] = EfficientNetForImageClassification(_lowerCamelCase ).eval()
A : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
A : List[Any] = rename_keys(_lowerCamelCase )
replace_params(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Initialize preprocessor and preprocess input image
A : Tuple = convert_image_processor(_lowerCamelCase )
A : List[str] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
A : Dict = hf_model(**_lowerCamelCase )
A : List[Any] = outputs.logits.detach().numpy()
# Original model inference
A : int = False
A : Optional[int] = CONFIG_MAP[model_name]["image_size"]
A : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A : str = image.img_to_array(_lowerCamelCase )
A : Dict = np.expand_dims(_lowerCamelCase , axis=0 )
A : List[Any] = original_model.predict(_lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowerCamelCase ):
os.mkdir(_lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_lowerCamelCase )
preprocessor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
A : Optional[int] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_lowerCamelCase )
hf_model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__SCREAMING_SNAKE_CASE = {"allegro/herbert-base-cased": 514}
__SCREAMING_SNAKE_CASE = {}
class lowerCamelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = HerbertTokenizer
def __init__( self : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : Optional[int]="</s>" , **__lowerCamelCase : str , ) -> str:
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> Any:
A : Optional[int] = [self.cls_token_id]
A : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> int:
A : List[Any] = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Optional[Any]:
A : int = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ ) | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__SCREAMING_SNAKE_CASE = 0B101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__SCREAMING_SNAKE_CASE = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ) -> Optional[Any]:
A : Optional[int] = WATERMARK_BITS
A : Dict = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> Any:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
A : Any = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A : int = [self.encoder.encode(_UpperCAmelCase , "dwtDct" ) for image in images]
A : Any = torch.from_numpy(np.array(_UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
A : Dict = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.